From 4f1504cda5fb0c80d6db719a6ac2b3ae619d5248 Mon Sep 17 00:00:00 2001 From: Michal Jankowski Date: Wed, 7 Dec 2016 14:50:06 -0800 Subject: [PATCH 001/342] =?UTF-8?q?-=20Exercise=20SecondaryPrivateIpAddres?= =?UTF-8?q?sCount=20from=20AWS=20SDK=20-=20Update=20Terraform=E2=80=99s=20?= =?UTF-8?q?documentation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../aws/resource_aws_network_interface.go | 61 +++++++++++++++++++ .../aws/r/network_interface.markdown | 5 +- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_network_interface.go b/builtin/providers/aws/resource_aws_network_interface.go index 5c9f8263e..857237141 100644 --- a/builtin/providers/aws/resource_aws_network_interface.go +++ b/builtin/providers/aws/resource_aws_network_interface.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "log" + "math" "strconv" "time" @@ -33,6 +34,12 @@ func resourceAwsNetworkInterface() *schema.Resource { ForceNew: true, }, + "private_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "private_ips": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -41,6 +48,12 @@ func resourceAwsNetworkInterface() *schema.Resource { Set: schema.HashString, }, + "private_ips_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "security_groups": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -110,6 +123,10 @@ func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) request.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("private_ips_count"); ok { + request.SecondaryPrivateIpAddressCount = aws.Int64(int64(v.(int))) + } + log.Printf("[DEBUG] Creating network interface") resp, err := conn.CreateNetworkInterface(request) if err != nil { @@ -144,6 +161,7 @@ func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) e eni := describeResp.NetworkInterfaces[0] d.Set("subnet_id", eni.SubnetId) + d.Set("private_ip", eni.PrivateIpAddress) d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddresses(eni.PrivateIpAddresses)) d.Set("security_groups", flattenGroupIdentifiers(eni.Groups)) d.Set("source_dest_check", eni.SourceDestCheck) @@ -300,6 +318,49 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{}) d.SetPartial("source_dest_check") + if d.HasChange("private_ips_count") { + o, n := d.GetChange("private_ips_count") + private_ips := d.Get("private_ips").(*schema.Set).List() + private_ips_filtered := private_ips[:0] + primary_ip := d.Get("private_ip") + + for _, ip := range private_ips { + if ip != primary_ip { + private_ips_filtered = append(private_ips_filtered, ip) + } + } + + if o != nil && o != 0 && n != nil && n != len(private_ips_filtered) { + + diff := n.(int) - o.(int) + + // Surplus of IPs, add the diff + if diff > 0 { + input := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + SecondaryPrivateIpAddressCount: aws.Int64(int64(diff)), + } + _, err := conn.AssignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to assign Private IPs: %s", err) + } + } + + if diff < 0 { + input := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + PrivateIpAddresses: expandStringList(private_ips_filtered[0:int(math.Abs(float64(diff)))]), + } + _, err := conn.UnassignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to unassign Private IPs: %s", err) + } + } + + d.SetPartial("private_ips_count") + } + } + if d.HasChange("security_groups") { request := &ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), diff --git a/website/source/docs/providers/aws/r/network_interface.markdown b/website/source/docs/providers/aws/r/network_interface.markdown index e52033ed3..636d1bcb4 100644 --- a/website/source/docs/providers/aws/r/network_interface.markdown +++ b/website/source/docs/providers/aws/r/network_interface.markdown @@ -31,6 +31,7 @@ The following arguments are supported: * `subnet_id` - (Required) Subnet ID to create the ENI in. * `description` - (Optional) A description for the network interface. * `private_ips` - (Optional) List of private IPs to assign to the ENI. +* `private_ips_count` - (Optional) Number of private IPs to assign to the ENI. * `security_groups` - (Optional) List of security group IDs to assign to the ENI. * `attachment` - (Optional) Block to define the attachment of the ENI. Documented below. * `source_dest_check` - (Optional) Whether to enable source destination checking for the ENI. Default true. @@ -57,8 +58,8 @@ The following attributes are exported: ## Import -Network Interfaces can be imported using the `id`, e.g. +Network Interfaces can be imported using the `id`, e.g. ``` $ terraform import aws_network_interface.test eni-e5aa89a3 -``` \ No newline at end of file +``` From 411db71d7018891b8370eaf59b66b47e4617a40c Mon Sep 17 00:00:00 2001 From: Michal Jankowski Date: Wed, 7 Dec 2016 17:55:11 -0800 Subject: [PATCH 002/342] - Add simple resource to attach ENI with instance - Add proper documentation --- builtin/providers/aws/provider.go | 1 + ...source_aws_network_interface_attachment.go | 116 ++++++++++++++++++ .../r/network_interface_attachment.markdown | 36 ++++++ 3 files changed, 153 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_network_interface_attachment.go create mode 100644 website/source/docs/providers/aws/r/network_interface_attachment.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 7b7aaabd5..19367de98 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -298,6 +298,7 @@ func Provider() terraform.ResourceProvider { "aws_default_route_table": resourceAwsDefaultRouteTable(), "aws_network_acl_rule": resourceAwsNetworkAclRule(), "aws_network_interface": resourceAwsNetworkInterface(), + "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), "aws_opsworks_application": resourceAwsOpsworksApplication(), "aws_opsworks_stack": resourceAwsOpsworksStack(), "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), diff --git a/builtin/providers/aws/resource_aws_network_interface_attachment.go b/builtin/providers/aws/resource_aws_network_interface_attachment.go new file mode 100644 index 000000000..a3bd33425 --- /dev/null +++ b/builtin/providers/aws/resource_aws_network_interface_attachment.go @@ -0,0 +1,116 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNetworkInterfaceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsNetworkInterfaceAttachmentCreate, + Read: resourceAwsNetworkInterfaceRead, + Delete: resourceAwsNetworkInterfaceAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "device_index": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_interface_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsNetworkInterfaceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + device_index := d.Get("device_index").(int) + instance_id := d.Get("instance_id").(string) + network_interface_id := d.Get("network_interface_id").(string) + + opts := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(int64(device_index)), + InstanceId: aws.String(instance_id), + NetworkInterfaceId: aws.String(network_interface_id), + } + + log.Printf("[DEBUG] Attaching network interface (%s) to instance (%s)", network_interface_id, instance_id) + resp, err := conn.AttachNetworkInterface(opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error attaching network interface (%s) to instance (%s), message: \"%s\", code: \"%s\"", + network_interface_id, instance_id, awsErr.Message(), awsErr.Code()) + } + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: networkInterfaceAttachmentRefreshFunc(conn, network_interface_id), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", network_interface_id, instance_id, err) + } + + d.SetId(*resp.AttachmentId) + return resourceAwsNetworkInterfaceRead(d, meta) +} + +func resourceAwsNetworkInterfaceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + network_interface_id := d.Get("network_interface_id").(string) + + detach_request := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String(d.Id()), + Force: aws.Bool(true), + } + + _, detach_err := conn.DetachNetworkInterface(detach_request) + if detach_err != nil { + if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { + return fmt.Errorf("Error detaching ENI: %s", detach_err) + } + } + + log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", network_interface_id) + stateConf := &resource.StateChangeConf{ + Pending: []string{"true"}, + Target: []string{"false"}, + Refresh: networkInterfaceAttachmentRefreshFunc(conn, network_interface_id), + Timeout: 10 * time.Minute, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for ENI (%s) to become dettached: %s", network_interface_id, err) + } + + return nil +} diff --git a/website/source/docs/providers/aws/r/network_interface_attachment.markdown b/website/source/docs/providers/aws/r/network_interface_attachment.markdown new file mode 100644 index 000000000..ae56d338b --- /dev/null +++ b/website/source/docs/providers/aws/r/network_interface_attachment.markdown @@ -0,0 +1,36 @@ +--- +layout: "aws" +page_title: "AWS: aws_network_interface_attachment" +sidebar_current: "docs-aws-resource-network-interface-attachment" +description: |- + Attach an Elastic network interface (ENI) resource with EC2 instance. +--- + +# aws\_network\_interface\_attachment + +Attach an Elastic network interface (ENI) resource with EC2 instance. + +## Example Usage + +``` +resource "aws_network_interface_attachment" "test" { + instance_id = "${aws_instance.test.id}" + network_interface_id = "${aws_network_interface.test.id}" + device_index = 0 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_id` - (Required) Instance ID to attach. +* `network_interface_id` - (Required) ENI ID to attach. +* `device_index` - (Required) Network interface index (int). + +## Attributes Reference + +The following attributes are exported: + +* `instance_id` - Instance ID. +* `network_interface_id` - Network interface ID. From 86aea6b09451aa36124098692ec453a090bfb195 Mon Sep 17 00:00:00 2001 From: Gerrit Tamboer Date: Wed, 8 Feb 2017 11:18:18 +0100 Subject: [PATCH 003/342] WIP: VaMaking vault allowed to read values --- .../vault/resource_generic_secret.go | 47 ++++++++++++++++++- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/builtin/providers/vault/resource_generic_secret.go b/builtin/providers/vault/resource_generic_secret.go index a2a820c74..e7c310df1 100644 --- a/builtin/providers/vault/resource_generic_secret.go +++ b/builtin/providers/vault/resource_generic_secret.go @@ -31,6 +31,25 @@ func genericSecretResource() *schema.Resource { Type: schema.TypeString, Required: true, Description: "JSON-encoded secret data to write.", + // We rebuild the attached JSON string to a simple singleline + // string. This makes terraform not want to change when an extra + // space is included in the JSON string. It is also necesarry + // when allow_read is true for comparing values. + StateFunc: func(v interface{}) string { + var dat map[string]interface{} + if err := json.Unmarshal([]byte(v.(string)), &dat); err != nil { + return fmt.Errorf("data_json %#v syntax error: %s", v.(string), err) + } + jsonDataBytes, _ := json.Marshal(dat) + return string(jsonDataBytes) + }, + }, + + "allow_read": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "True if the provided token is allowed to read the secret from vault, and therefore canupdate values", }, }, } @@ -73,6 +92,31 @@ func genericSecretResourceDelete(d *schema.ResourceData, meta interface{}) error } func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { + allowed_to_read := d.Get("allow_read").(bool) + + if allowed_to_read { + path := d.Get("path").(string) + + client := meta.(*api.Client) + + log.Printf("[DEBUG] Reading %s from Vault", path) + secret, err := client.Logical().Read(path) + if err != nil { + return fmt.Errorf("error reading from Vault: %s", err) + } + + d.SetId(path) + + // Ignoring error because this value came from JSON in the + // first place so no reason why it should fail to re-encode. + jsonDataBytes, _ := json.Marshal(secret.Data) + d.Set("data_json", string(jsonDataBytes)) + } + + path := d.Get("path").(string) + d.SetId(path) + return nil + // We don't actually attempt to read back the secret data // here, so that Terraform can be configured with a token // that has only write access to the relevant part of the @@ -82,6 +126,5 @@ func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { // generic secrets, but detecting drift seems less important // than being able to limit the effect of exposure of // Terraform's Vault token. - log.Printf("[WARN] vault_generic_secret does not automatically refresh") - return nil + // log.Printf("[WARN] vault_generic_secret does not automatically refresh") } From c7eee62b7b9ec81a8efcf6d809584dbaadad95e7 Mon Sep 17 00:00:00 2001 From: Gerrit Tamboer Date: Wed, 8 Feb 2017 13:08:39 +0100 Subject: [PATCH 004/342] Finished the read implementation for vault generic secret --- .../vault/resource_generic_secret.go | 45 +++++++++++++------ 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/builtin/providers/vault/resource_generic_secret.go b/builtin/providers/vault/resource_generic_secret.go index e7c310df1..79aed5afe 100644 --- a/builtin/providers/vault/resource_generic_secret.go +++ b/builtin/providers/vault/resource_generic_secret.go @@ -35,14 +35,8 @@ func genericSecretResource() *schema.Resource { // string. This makes terraform not want to change when an extra // space is included in the JSON string. It is also necesarry // when allow_read is true for comparing values. - StateFunc: func(v interface{}) string { - var dat map[string]interface{} - if err := json.Unmarshal([]byte(v.(string)), &dat); err != nil { - return fmt.Errorf("data_json %#v syntax error: %s", v.(string), err) - } - jsonDataBytes, _ := json.Marshal(dat) - return string(jsonDataBytes) - }, + StateFunc: NormalizeDataJSON, + ValidateFunc: ValidateDataJSON, }, "allow_read": &schema.Schema{ @@ -55,6 +49,35 @@ func genericSecretResource() *schema.Resource { } } +func ValidateDataJSON(configI interface{}, k string) ([]string, []error) { + dataJSON := configI.(string) + dataMap := map[string]interface{}{} + err := json.Unmarshal([]byte(dataJSON), &dataMap) + if err != nil { + return nil, []error{err} + } + return nil, nil +} + +func NormalizeDataJSON(configI interface{}) string { + dataJSON := configI.(string) + + dataMap := map[string]interface{}{} + err := json.Unmarshal([]byte(dataJSON), &dataMap) + if err != nil { + // The validate function should've taken care of this. + return "" + } + + ret, err := json.Marshal(dataMap) + if err != nil { + // Should never happen. + return dataJSON + } + + return string(ret) +} + func genericSecretResourceWrite(d *schema.ResourceData, meta interface{}) error { client := meta.(*api.Client) @@ -93,10 +116,9 @@ func genericSecretResourceDelete(d *schema.ResourceData, meta interface{}) error func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { allowed_to_read := d.Get("allow_read").(bool) + path := d.Get("path").(string) if allowed_to_read { - path := d.Get("path").(string) - client := meta.(*api.Client) log.Printf("[DEBUG] Reading %s from Vault", path) @@ -105,15 +127,12 @@ func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading from Vault: %s", err) } - d.SetId(path) - // Ignoring error because this value came from JSON in the // first place so no reason why it should fail to re-encode. jsonDataBytes, _ := json.Marshal(secret.Data) d.Set("data_json", string(jsonDataBytes)) } - path := d.Get("path").(string) d.SetId(path) return nil From 369c81007255787dceba45d0d790263821f03ea0 Mon Sep 17 00:00:00 2001 From: Gerrit Tamboer Date: Wed, 8 Feb 2017 13:37:37 +0100 Subject: [PATCH 005/342] Updated documentation --- builtin/providers/vault/resource_generic_secret.go | 14 ++------------ .../vault/resource_generic_secret_test.go | 2 ++ .../docs/providers/vault/r/generic_secret.html.md | 9 +++++++-- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/builtin/providers/vault/resource_generic_secret.go b/builtin/providers/vault/resource_generic_secret.go index 79aed5afe..cbb8deda0 100644 --- a/builtin/providers/vault/resource_generic_secret.go +++ b/builtin/providers/vault/resource_generic_secret.go @@ -43,7 +43,7 @@ func genericSecretResource() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: false, - Description: "True if the provided token is allowed to read the secret from vault, and therefore canupdate values", + Description: "True if the provided token is allowed to read the secret from vault", }, }, } @@ -134,16 +134,6 @@ func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(path) + log.Printf("[WARN] vault_generic_secret does not automatically refresh if allow_read is set to false") return nil - - // We don't actually attempt to read back the secret data - // here, so that Terraform can be configured with a token - // that has only write access to the relevant part of the - // store. - // - // This means that Terraform cannot detect drift for - // generic secrets, but detecting drift seems less important - // than being able to limit the effect of exposure of - // Terraform's Vault token. - // log.Printf("[WARN] vault_generic_secret does not automatically refresh") } diff --git a/builtin/providers/vault/resource_generic_secret_test.go b/builtin/providers/vault/resource_generic_secret_test.go index 7636565cd..5acaac9b1 100644 --- a/builtin/providers/vault/resource_generic_secret_test.go +++ b/builtin/providers/vault/resource_generic_secret_test.go @@ -31,6 +31,7 @@ var testResourceGenericSecret_initialConfig = ` resource "vault_generic_secret" "test" { path = "secret/foo" + allow_read = true data_json = < Date: Wed, 8 Mar 2017 12:36:01 -0800 Subject: [PATCH 006/342] Add coalescelist interpolation function --- config/interpolate_funcs.go | 25 +++++++++++++++++ config/interpolate_funcs_test.go | 27 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 3 +++ 3 files changed, 55 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index ad543c308..d9a357a1d 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -60,6 +60,7 @@ func Funcs() map[string]ast.Function { "cidrnetmask": interpolationFuncCidrNetmask(), "cidrsubnet": interpolationFuncCidrSubnet(), "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), "compact": interpolationFuncCompact(), "concat": interpolationFuncConcat(), "distinct": interpolationFuncDistinct(), @@ -318,6 +319,30 @@ func interpolationFuncCoalesce() ast.Function { } } +// interpolationFuncCoalesceList implements the "coalescelist" function that +// returns the first non empty list from the provided input +func interpolationFuncCoalesceList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.([]ast.Variable) + + if len(argument) > 0 { + return argument, nil + } + } + return make([]ast.Variable, 0), nil + }, + } +} + // interpolationFuncConcat implements the "concat" function that concatenates // multiple lists. func interpolationFuncConcat() ast.Function { diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 193fcd147..4f1c3fa0a 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -615,6 +615,33 @@ func TestInterpolateFuncCoalesce(t *testing.T) { }) } +func TestInterpolateFuncCoalesceList(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${coalescelist(list("first"), list("second"), list("third"))}`, + []interface{}{"first"}, + false, + }, + { + `${coalescelist(list(), list("second"), list("third"))}`, + []interface{}{"second"}, + false, + }, + { + `${coalescelist(list(), list(), list())}`, + []interface{}{}, + false, + }, + { + `${coalescelist(list("foo"))}`, + nil, + true, + }, + }, + }) +} + func TestInterpolateFuncConcat(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 2d1e3052b..cb7ad8cfe 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -169,6 +169,9 @@ The supported built-in functions are: * `coalesce(string1, string2, ...)` - Returns the first non-empty value from the given arguments. At least two arguments must be provided. + * `coalescelist(list1, list2, ...)` - Returns the first non-empty list from + the given arguments. At least two arguments must be provided. + * `compact(list)` - Removes empty string elements from a list. This can be useful in some cases, for example when passing joined lists as module variables or when parsing module outputs. From c5d1f8f88f26b63d1164bbe79f6399e43c393dcf Mon Sep 17 00:00:00 2001 From: Stephen Cross Date: Wed, 29 Mar 2017 16:30:08 +0000 Subject: [PATCH 007/342] Initial Oracle Compute Cloud provider --- builtin/providers/oracleopc/config.go | 47 +++ builtin/providers/oracleopc/provider.go | 75 +++++ builtin/providers/oracleopc/provider_test.go | 61 ++++ .../providers/oracleopc/resource_instance.go | 306 ++++++++++++++++++ .../oracleopc/resource_instance_test.go | 156 +++++++++ .../oracleopc/resource_ip_association.go | 103 ++++++ .../oracleopc/resource_ip_association_test.go | 74 +++++ .../oracleopc/resource_ip_reservation.go | 122 +++++++ .../resource_security_application.go | 124 +++++++ .../resource_security_association.go | 103 ++++++ .../resource_security_association_test.go | 75 +++++ .../oracleopc/resource_security_ip_list.go | 117 +++++++ .../oracleopc/resource_security_list.go | 119 +++++++ .../oracleopc/resource_security_rule.go | 143 ++++++++ .../oracleopc/resource_security_rule_test.go | 85 +++++ .../providers/oracleopc/resource_ssh_key.go | 117 +++++++ .../oracleopc/resource_storage_volume.go | 301 +++++++++++++++++ .../oracleopc/resource_storage_volume_test.go | 70 ++++ 18 files changed, 2198 insertions(+) create mode 100644 builtin/providers/oracleopc/config.go create mode 100644 builtin/providers/oracleopc/provider.go create mode 100644 builtin/providers/oracleopc/provider_test.go create mode 100644 builtin/providers/oracleopc/resource_instance.go create mode 100644 builtin/providers/oracleopc/resource_instance_test.go create mode 100644 builtin/providers/oracleopc/resource_ip_association.go create mode 100644 builtin/providers/oracleopc/resource_ip_association_test.go create mode 100644 builtin/providers/oracleopc/resource_ip_reservation.go create mode 100644 builtin/providers/oracleopc/resource_security_application.go create mode 100644 builtin/providers/oracleopc/resource_security_association.go create mode 100644 builtin/providers/oracleopc/resource_security_association_test.go create mode 100644 builtin/providers/oracleopc/resource_security_ip_list.go create mode 100644 builtin/providers/oracleopc/resource_security_list.go create mode 100644 builtin/providers/oracleopc/resource_security_rule.go create mode 100644 builtin/providers/oracleopc/resource_security_rule_test.go create mode 100644 builtin/providers/oracleopc/resource_ssh_key.go create mode 100644 builtin/providers/oracleopc/resource_storage_volume.go create mode 100644 builtin/providers/oracleopc/resource_storage_volume_test.go diff --git a/builtin/providers/oracleopc/config.go b/builtin/providers/oracleopc/config.go new file mode 100644 index 000000000..fbae3b5d5 --- /dev/null +++ b/builtin/providers/oracleopc/config.go @@ -0,0 +1,47 @@ +package opc + +import ( + "fmt" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "net/url" +) + +type Config struct { + User string + Password string + IdentityDomain string + Endpoint string + MaxRetryTimeout int +} + +type storageAttachment struct { + index int + instanceName *compute.InstanceName +} + +type OPCClient struct { + *compute.AuthenticatedClient + MaxRetryTimeout int + storageAttachmentsByVolumeCache map[string][]storageAttachment +} + +func (c *Config) Client() (*OPCClient, error) { + u, err := url.ParseRequestURI(c.Endpoint) + if err != nil { + return nil, fmt.Errorf("Invalid endpoint URI: %s", err) + } + + client := compute.NewComputeClient(c.IdentityDomain, c.User, c.Password, u) + authenticatedClient, err := client.Authenticate() + if err != nil { + return nil, fmt.Errorf("Authentication failed: %s", err) + } + + opcClient := &OPCClient{ + AuthenticatedClient: authenticatedClient, + MaxRetryTimeout: c.MaxRetryTimeout, + storageAttachmentsByVolumeCache: make(map[string][]storageAttachment), + } + + return opcClient, nil +} diff --git a/builtin/providers/oracleopc/provider.go b/builtin/providers/oracleopc/provider.go new file mode 100644 index 000000000..a6d0d3fb5 --- /dev/null +++ b/builtin/providers/oracleopc/provider.go @@ -0,0 +1,75 @@ +package opc + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "user": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_USERNAME", nil), + Description: "The user name for OPC API operations.", + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_PASSWORD", nil), + Description: "The user password for OPC API operations.", + }, + + "identityDomain": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_IDENTITY_DOMAIN", nil), + Description: "The OPC identity domain for API operations", + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_ENDPOINT", nil), + Description: "The HTTP endpoint for OPC API operations.", + }, + + "maxRetryTimeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_MAX_RETRY_TIMEOUT", 3000), + Description: "Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000)", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "opc_compute_storage_volume": resourceStorageVolume(), + "opc_compute_instance": resourceInstance(), + "opc_compute_ssh_key": resourceSSHKey(), + "opc_compute_security_application": resourceSecurityApplication(), + "opc_compute_security_list": resourceSecurityList(), + "opc_compute_security_ip_list": resourceSecurityIPList(), + "opc_compute_ip_reservation": resourceIPReservation(), + "opc_compute_ip_association": resourceIPAssociation(), + "opc_compute_security_rule": resourceSecurityRule(), + "opc_compute_security_association": resourceSecurityAssociation(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + User: d.Get("user").(string), + Password: d.Get("password").(string), + IdentityDomain: d.Get("identityDomain").(string), + Endpoint: d.Get("endpoint").(string), + MaxRetryTimeout: d.Get("maxRetryTimeout").(int), + } + + return config.Client() +} diff --git a/builtin/providers/oracleopc/provider_test.go b/builtin/providers/oracleopc/provider_test.go new file mode 100644 index 000000000..c60076b06 --- /dev/null +++ b/builtin/providers/oracleopc/provider_test.go @@ -0,0 +1,61 @@ +package opc + +import ( + "os" + "testing" + + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "opc": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + required := []string{"OPC_USERNAME", "OPC_PASSWORD", "OPC_IDENTITY_DOMAIN", "OPC_ENDPOINT"} + for _, prop := range required { + if os.Getenv(prop) == "" { + t.Fatalf("%s must be set for acceptance test", prop) + } + } +} + +type OPCResourceState struct { + *OPCClient + *terraform.InstanceState +} + +func opcResourceCheck(resourceName string, f func(checker *OPCResourceState) error) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Resource not found: %s", resourceName) + } + + state := &OPCResourceState{ + OPCClient: testAccProvider.Meta().(*OPCClient), + InstanceState: rs.Primary, + } + + return f(state) + } +} diff --git a/builtin/providers/oracleopc/resource_instance.go b/builtin/providers/oracleopc/resource_instance.go new file mode 100644 index 000000000..70f3b99c8 --- /dev/null +++ b/builtin/providers/oracleopc/resource_instance.go @@ -0,0 +1,306 @@ +package opc + +import ( + "encoding/json" + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceInstanceCreate, + Read: resourceInstanceRead, + Delete: resourceInstanceDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "shape": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "imageList": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "label": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip": { + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + + "opcId": { + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + + "sshKeys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "attributes": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "vcable": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "storage": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "volume": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "bootOrder": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func getAttrs(d *schema.ResourceData) (*map[string]interface{}, error) { + var attrs map[string]interface{} + + attrString := d.Get("attributes").(string) + if attrString == "" { + return &attrs, nil + } + if err := json.Unmarshal([]byte(attrString), &attrs); err != nil { + return &attrs, fmt.Errorf("Cannot parse '%s' as json", attrString) + } + return &attrs, nil +} + +func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + shape := d.Get("shape").(string) + imageList := d.Get("imageList").(string) + label := d.Get("label").(string) + storage := getStorageAttachments(d) + sshKeys := getSSHKeys(d) + bootOrder := getBootOrder(d) + + attrs, err := getAttrs(d) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating instance with name %s, shape %s, imageList %s, storage %s, bootOrder %s, label %s, sshKeys %s, attrs %#v", + name, shape, imageList, storage, bootOrder, label, sshKeys, attrs) + + id, err := client.LaunchInstance(name, label, shape, imageList, storage, bootOrder, sshKeys, *attrs) + if err != nil { + return fmt.Errorf("Error creating instance %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for instance %s to come online", id.String()) + info, err := client.WaitForInstanceRunning(id, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for instance %s to come online: %s", id, err) + } + + log.Printf("[DEBUG] Created instance %s: %#v", id, info) + + attachStorage( + &compute.InstanceName{ + Name: info.Name, + ID: info.ID, + }, + d, meta) + + d.SetId(info.Name) + updateInstanceResourceData(d, info) + return nil +} + +func attachStorage(name *compute.InstanceName, d *schema.ResourceData, meta interface{}) error { + storageClient := meta.(*OPCClient).StorageAttachments() + storage := d.Get("storage").(*schema.Set) + updatedStorage := schema.NewSet(storage.F, []interface{}{}) + + for _, i := range storage.List() { + attrs := i.(map[string]interface{}) + attachmentInfo, err := storageClient.CreateStorageAttachment( + attrs["index"].(int), + name, + attrs["volume"].(string)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for storage attachment %#v to come online", attachmentInfo) + storageClient.WaitForStorageAttachmentCreated(attachmentInfo.Name, meta.(*OPCClient).MaxRetryTimeout) + log.Printf("[DEBUG] Storage attachment %s: %s-%s created", + attachmentInfo.Name, attachmentInfo.InstanceName, attachmentInfo.StorageVolumeName) + attrs["name"] = attachmentInfo.Name + updatedStorage.Add(attrs) + } + + d.Set("storage", updatedStorage) + return nil +} + +func getSSHKeys(d *schema.ResourceData) []string { + sshKeys := []string{} + for _, i := range d.Get("sshKeys").([]interface{}) { + sshKeys = append(sshKeys, i.(string)) + } + return sshKeys +} + +func getBootOrder(d *schema.ResourceData) []int { + bootOrder := []int{} + for _, i := range d.Get("bootOrder").([]interface{}) { + bootOrder = append(bootOrder, i.(int)) + } + return bootOrder +} + +func getStorageAttachments(d *schema.ResourceData) []compute.LaunchPlanStorageAttachmentSpec { + storageAttachments := []compute.LaunchPlanStorageAttachmentSpec{} + storage := d.Get("storage").(*schema.Set) + for _, i := range storage.List() { + attrs := i.(map[string]interface{}) + storageAttachments = append(storageAttachments, compute.LaunchPlanStorageAttachmentSpec{ + Index: attrs["index"].(int), + Volume: attrs["volume"].(string), + }) + } + return storageAttachments +} + +func updateInstanceResourceData(d *schema.ResourceData, info *compute.InstanceInfo) error { + d.Set("name", info.Name) + d.Set("opcId", info.ID) + d.Set("imageList", info.ImageList) + d.Set("bootOrder", info.BootOrder) + d.Set("sshKeys", info.SSHKeys) + d.Set("label", info.Label) + d.Set("ip", info.IPAddress) + d.Set("vcable", info.VCableID) + + return nil +} + +func resourceInstanceRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + instanceName := &compute.InstanceName{ + Name: name, + ID: d.Get("opcId").(string), + } + + log.Printf("[DEBUG] Reading state of instance %s", instanceName) + result, err := client.GetInstance(instanceName) + if err != nil { + // Instance doesn't exist + if compute.WasNotFoundError(err) { + log.Printf("[DEBUG] Instance %s not found", instanceName) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading instance %s: %s", instanceName, err) + } + + log.Printf("[DEBUG] Read state of instance %s: %#v", instanceName, result) + + attachments, err := meta.(*OPCClient).StorageAttachments().GetStorageAttachmentsForInstance(instanceName) + if err != nil { + return fmt.Errorf("Error reading storage attachments for instance %s: %s", instanceName, err) + } + updateInstanceResourceData(d, result) + updateAttachmentResourceData(d, attachments) + return nil +} + +func updateAttachmentResourceData(d *schema.ResourceData, attachments *[]compute.StorageAttachmentInfo) { + attachmentSet := schema.NewSet(d.Get("storage").(*schema.Set).F, []interface{}{}) + for _, attachment := range *attachments { + properties := map[string]interface{}{ + "index": attachment.Index, + "volume": attachment.StorageVolumeName, + "name": attachment.Name, + } + attachmentSet.Add(properties) + } + d.Set("storage", attachmentSet) +} + +func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + + instanceName := &compute.InstanceName{ + Name: name, + ID: d.Get("opcId").(string), + } + + log.Printf("[DEBUG] Deleting instance %s", instanceName) + if err := client.DeleteInstance(instanceName); err != nil { + return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) + } + if err := client.WaitForInstanceDeleted(instanceName, meta.(*OPCClient).MaxRetryTimeout); err != nil { + return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) + } + + for _, attachment := range d.Get("storage").(*schema.Set).List() { + name := attachment.(map[string]interface{})["name"].(string) + log.Printf("[DEBUG] Deleting storage attachment %s", name) + client.StorageAttachments().DeleteStorageAttachment(name) + client.StorageAttachments().WaitForStorageAttachmentDeleted(name, meta.(*OPCClient).MaxRetryTimeout) + } + + return nil +} diff --git a/builtin/providers/oracleopc/resource_instance_test.go b/builtin/providers/oracleopc/resource_instance_test.go new file mode 100644 index 000000000..6f386af84 --- /dev/null +++ b/builtin/providers/oracleopc/resource_instance_test.go @@ -0,0 +1,156 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "testing" +) + +func TestAccOPCInstance_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + instanceResourceName, + testAccCheckInstanceDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccInstanceBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + instanceResourceName, + testAccCheckInstanceExists), + opcResourceCheck( + keyResourceName, + testAccCheckSSHKeyExists), + ), + }, + { + Config: modifySSHKey, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + instanceResourceName, + testAccCheckInstanceExists), + opcResourceCheck( + keyResourceName, + testAccCheckSSHKeyUpdated), + ), + }, + }, + }) +} + +func testAccCheckInstanceExists(state *OPCResourceState) error { + instanceName := getInstanceName(state) + + if _, err := state.Instances().GetInstance(instanceName); err != nil { + return fmt.Errorf("Error retrieving state of instance %s: %s", instanceName, err) + } + + return nil +} + +func testAccCheckSSHKeyExists(state *OPCResourceState) error { + keyName := state.Attributes["name"] + + if _, err := state.SSHKeys().GetSSHKey(keyName); err != nil { + return fmt.Errorf("Error retrieving state of key %s: %s", keyName, err) + } + + return nil +} + +func testAccCheckSSHKeyUpdated(state *OPCResourceState) error { + keyName := state.Attributes["name"] + info, err := state.SSHKeys().GetSSHKey(keyName) + if err != nil { + return err + } + if info.Key != updatedKey { + return fmt.Errorf("Expected key\n\t%s\nbut was\n\t%s", updatedKey, info.Key) + } + return nil +} + +func getInstanceName(rs *OPCResourceState) *compute.InstanceName { + return &compute.InstanceName{ + Name: rs.Attributes["name"], + ID: rs.Attributes["opcId"], + } +} + +func testAccCheckInstanceDestroyed(state *OPCResourceState) error { + instanceName := getInstanceName(state) + if info, err := state.Instances().GetInstance(instanceName); err == nil { + return fmt.Errorf("Instance %s still exists: %#v", instanceName, info) + } + + return nil +} + +const instanceName = "test_instance" +const keyName = "test_key" + +var instanceResourceName = fmt.Sprintf("opc_compute_instance.%s", instanceName) +var keyResourceName = fmt.Sprintf("opc_compute_ssh_key.%s", keyName) + +const originalKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" +const updatedKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHvb/2OSemgzUYLNW1/T3u33r7sZy1qbWtgVWiREH4gS5TVmDVPuvN1MFLdNqiWQA53gK8Gp24jtjNm9ftcPhicv81HVWJTB69C0sJGEfF0l4mgbemJLH3i37Mb6SdWJcGof9qHVDADPgiC8jIBVUhdiJSeq4fUJ3NQA2eUExBkRglQWairkNzPNA0mi3GL9KDGnoBnSCAXNGoKgDgIOqW0dYFP6oHyGWkF7V+/TME9aIQvmMpHjVzl7brZ/wED2t5vTJxxbgogHEmWnfs7p8EP5IsN6Vnjd0VNIt1tu3TduS8kH5npkPqZz8oIP93Ypxn0l7ZNEl9MahbhPj3gJ1YY7Cygrlt1VLC1ibBbOgIS2Lj6vGG/Yjkqs3Vw6qrmTRlsJ9c6bZO2xq0xzV11XQHvjPegBOClF6AztEe1jKU/RUFnzjIF8lUmM63fTaXuVkNERkTSE3E9XL3Uq6eqYdef7wHFFhCMSGotp3ANAb30kflysA9ID0b3o5QU2tB8OBxBicXQy11lh+u204YJuvIzeTXo+JAad5TWFlJcsUlbPFppLQdhUpoWaJouBGJV36DJb9R34i9T8Ze5tnJUQgPmMkERyPvb/+v5j3s2hs1A9WO6/MqmZd70gudsX/1bqWT898vCCOdM+CspNVY7nHVUtde7C6BrHzphr/C1YBXHw==" + +var testAccInstanceBasic = fmt.Sprintf(` +resource "opc_compute_instance" "%s" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.test_key.name}"] + attributes = "{\"foo\": \"bar\"}" + storage = { + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + } +} + +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} + +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "%s" + enabled = true +} +`, instanceName, keyName, originalKey) + +var modifySSHKey = fmt.Sprintf(` +resource "opc_compute_instance" "%s" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.test_key.name}"] + attributes = "{\"foo\": \"bar\"}" + storage = { + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + } +} + +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} + +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "%s" + enabled = true +} +`, instanceName, keyName, updatedKey) diff --git a/builtin/providers/oracleopc/resource_ip_association.go b/builtin/providers/oracleopc/resource_ip_association.go new file mode 100644 index 000000000..84df10ba8 --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_association.go @@ -0,0 +1,103 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceIPAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceIPAssociationCreate, + Read: resourceIPAssociationRead, + Delete: resourceIPAssociationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "vcable": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "parentpool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceIPAssociationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + vcable, parentpool := getIPAssociationResourceData(d) + + log.Printf("[DEBUG] Creating ip association between vcable %s and parent pool %s", + vcable, parentpool) + + client := meta.(*OPCClient).IPAssociations() + info, err := client.CreateIPAssociation(vcable, parentpool) + if err != nil { + return fmt.Errorf("Error creating ip association between vcable %s and parent pool %s: %s", + vcable, parentpool, err) + } + + d.SetId(info.Name) + updateIPAssociationResourceData(d, info) + return nil +} + +func updateIPAssociationResourceData(d *schema.ResourceData, info *compute.IPAssociationInfo) { + d.Set("name", info.Name) + d.Set("parentpool", info.ParentPool) + d.Set("vcable", info.VCable) +} + +func resourceIPAssociationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPAssociations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ip association %s", name) + result, err := client.GetIPAssociation(name) + if err != nil { + // IP Association does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ip association %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ip association %s: %#v", name, result) + updateIPAssociationResourceData(d, result) + return nil +} + +func getIPAssociationResourceData(d *schema.ResourceData) (string, string) { + return d.Get("vcable").(string), d.Get("parentpool").(string) +} + +func resourceIPAssociationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPAssociations() + name := d.Get("name").(string) + + vcable, parentpool := getIPAssociationResourceData(d) + log.Printf("[DEBUG] Deleting ip association %s between vcable %s and parent pool %s", + name, vcable, parentpool) + + if err := client.DeleteIPAssociation(name); err != nil { + return fmt.Errorf("Error deleting ip association %s between vcable %s and parent pool %s: %s", + name, vcable, parentpool, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_ip_association_test.go b/builtin/providers/oracleopc/resource_ip_association_test.go new file mode 100644 index 000000000..44f48474f --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_association_test.go @@ -0,0 +1,74 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceIPAssociation_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + ipAssociationResourceName, + testAccCheckIPAssociationDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccIPAssociationBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + ipAssociationResourceName, + testAccCheckIPAssociationExists), + ), + }, + }, + }) +} + +func testAccCheckIPAssociationExists(state *OPCResourceState) error { + associationName := getIPAssociationName(state) + + if _, err := state.IPAssociations().GetIPAssociation(associationName); err != nil { + return fmt.Errorf("Error retrieving state of ip assocation %s: %s", associationName, err) + } + + return nil +} + +func getIPAssociationName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckIPAssociationDestroyed(state *OPCResourceState) error { + associationName := getAssociationName(state) + if info, err := state.IPAssociations().GetIPAssociation(associationName); err == nil { + return fmt.Errorf("IP association %s still exists: %#v", associationName, info) + } + + return nil +} + +const ipAssociationName = "test_ip_association" + +var ipAssociationResourceName = fmt.Sprintf("opc_compute_ip_association.%s", ipAssociationName) + +var testAccIPAssociationBasic = fmt.Sprintf(` +resource "opc_compute_ip_reservation" "reservation1" { + parentpool = "/oracle/public/ippool" + permanent = true +} + +resource "opc_compute_ip_association" "%s" { + vcable = "${opc_compute_instance.test-instance1.vcable}" + parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" +} + +resource "opc_compute_instance" "test-instance1" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" +} +`, ipAssociationName) diff --git a/builtin/providers/oracleopc/resource_ip_reservation.go b/builtin/providers/oracleopc/resource_ip_reservation.go new file mode 100644 index 000000000..fa25679d2 --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_reservation.go @@ -0,0 +1,122 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceIPReservation() *schema.Resource { + return &schema.Resource{ + Create: resourceIPReservationCreate, + Read: resourceIPReservationRead, + Delete: resourceIPReservationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "permanent": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + + "parentpool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ip": &schema.Schema{ + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + }, + } +} + +func resourceIPReservationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + parentpool, permanent, tags := getIPReservationResourceData(d) + + log.Printf("[DEBUG] Creating ip reservation from parentpool %s with tags=%s", + parentpool, tags) + + client := meta.(*OPCClient).IPReservations() + info, err := client.CreateIPReservation(parentpool, permanent, tags) + if err != nil { + return fmt.Errorf("Error creating ip reservation from parentpool %s with tags=%s: %s", + parentpool, tags, err) + } + + d.SetId(info.Name) + updateIPReservationResourceData(d, info) + return nil +} + +func updateIPReservationResourceData(d *schema.ResourceData, info *compute.IPReservationInfo) { + d.Set("name", info.Name) + d.Set("parentpool", info.ParentPool) + d.Set("permanent", info.Permanent) + d.Set("tags", info.Tags) + d.Set("ip", info.IP) +} + +func resourceIPReservationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPReservations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ip reservation %s", name) + result, err := client.GetIPReservation(name) + if err != nil { + // IP Reservation does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ip reservation %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ip reservation %s: %#v", name, result) + updateIPReservationResourceData(d, result) + return nil +} + +func getIPReservationResourceData(d *schema.ResourceData) (string, bool, []string) { + tagdata := d.Get("tags").([]interface{}) + tags := make([]string, len(tagdata)) + for i, tag := range tagdata { + tags[i] = tag.(string) + } + return d.Get("parentpool").(string), + d.Get("permanent").(bool), + tags +} + +func resourceIPReservationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPReservations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ip reservation %s", name) + + if err := client.DeleteIPReservation(name); err != nil { + return fmt.Errorf("Error deleting ip reservation %s", name) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_application.go b/builtin/providers/oracleopc/resource_security_application.go new file mode 100644 index 000000000..b7205754c --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_application.go @@ -0,0 +1,124 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityApplication() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityApplicationCreate, + Read: resourceSecurityApplicationRead, + Delete: resourceSecurityApplicationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dport": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "icmptype": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "icmpcode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceSecurityApplicationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, protocol, dport, icmptype, icmpcode, description := getSecurityApplicationResourceData(d) + + log.Printf("[DEBUG] Creating security application %s", name) + + client := meta.(*OPCClient).SecurityApplications() + info, err := client.CreateSecurityApplication(name, protocol, dport, icmptype, icmpcode, description) + if err != nil { + return fmt.Errorf("Error creating security application %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityApplicationResourceData(d, info) + return nil +} + +func updateSecurityApplicationResourceData(d *schema.ResourceData, info *compute.SecurityApplicationInfo) { + d.Set("name", info.Name) + d.Set("protocol", info.Protocol) + d.Set("dport", info.DPort) + d.Set("icmptype", info.ICMPType) + d.Set("icmpcode", info.ICMPCode) + d.Set("description", info.Description) +} + +func resourceSecurityApplicationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityApplications() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security application %s", name) + result, err := client.GetSecurityApplication(name) + if err != nil { + // Security Application does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security application %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security application %s: %#v", name, result) + updateSecurityApplicationResourceData(d, result) + return nil +} + +func getSecurityApplicationResourceData(d *schema.ResourceData) (string, string, string, string, string, string) { + return d.Get("name").(string), + d.Get("protocol").(string), + d.Get("dport").(string), + d.Get("icmptype").(string), + d.Get("icmpcode").(string), + d.Get("description").(string) +} + +func resourceSecurityApplicationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityApplications() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting security application %s", name) + + if err := client.DeleteSecurityApplication(name); err != nil { + return fmt.Errorf("Error deleting security application %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_association.go b/builtin/providers/oracleopc/resource_security_association.go new file mode 100644 index 000000000..15a912657 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_association.go @@ -0,0 +1,103 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityAssociationCreate, + Read: resourceSecurityAssociationRead, + Delete: resourceSecurityAssociationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "vcable": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "seclist": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceSecurityAssociationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + vcable, seclist := getSecurityAssociationResourceData(d) + + log.Printf("[DEBUG] Creating security association between vcable %s and security list %s", + vcable, seclist) + + client := meta.(*OPCClient).SecurityAssociations() + info, err := client.CreateSecurityAssociation(vcable, seclist) + if err != nil { + return fmt.Errorf("Error creating security association between vcable %s and security list %s: %s", + vcable, seclist, err) + } + + d.SetId(info.Name) + updateSecurityAssociationResourceData(d, info) + return nil +} + +func updateSecurityAssociationResourceData(d *schema.ResourceData, info *compute.SecurityAssociationInfo) { + d.Set("name", info.Name) + d.Set("seclist", info.SecList) + d.Set("vcable", info.VCable) +} + +func resourceSecurityAssociationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityAssociations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security association %s", name) + result, err := client.GetSecurityAssociation(name) + if err != nil { + // Security Association does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security association %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security association %s: %#v", name, result) + updateSecurityAssociationResourceData(d, result) + return nil +} + +func getSecurityAssociationResourceData(d *schema.ResourceData) (string, string) { + return d.Get("vcable").(string), d.Get("seclist").(string) +} + +func resourceSecurityAssociationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityAssociations() + name := d.Get("name").(string) + + vcable, seclist := getSecurityAssociationResourceData(d) + log.Printf("[DEBUG] Deleting security association %s between vcable %s and security list %s", + name, vcable, seclist) + + if err := client.DeleteSecurityAssociation(name); err != nil { + return fmt.Errorf("Error deleting security association %s between vcable %s and security list %s: %s", + name, vcable, seclist, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_association_test.go b/builtin/providers/oracleopc/resource_security_association_test.go new file mode 100644 index 000000000..604ef64cb --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_association_test.go @@ -0,0 +1,75 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceSecurityAssociation_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + associationResourceName, + testAccCheckAssociationDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccSecurityAssociationBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + associationResourceName, + testAccCheckAssociationExists), + ), + }, + }, + }) +} + +func testAccCheckAssociationExists(state *OPCResourceState) error { + associationName := getAssociationName(state) + + if _, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err != nil { + return fmt.Errorf("Error retrieving state of security assocation %s: %s", associationName, err) + } + + return nil +} + +func getAssociationName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckAssociationDestroyed(state *OPCResourceState) error { + associationName := getAssociationName(state) + if info, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err == nil { + return fmt.Errorf("Association %s still exists: %#v", associationName, info) + } + + return nil +} + +const associationName = "test_rule" + +var associationResourceName = fmt.Sprintf("opc_compute_security_association.%s", associationName) + +var testAccSecurityAssociationBasic = fmt.Sprintf(` +resource "opc_compute_security_list" "sec-list1" { + name = "sec-list-1" + policy = "PERMIT" + outbound_cidr_policy = "DENY" +} + +resource "opc_compute_security_association" "%s" { + vcable = "${opc_compute_instance.test-instance1.vcable}" + seclist = "${opc_compute_security_list.sec-list1.name}" +} + +resource "opc_compute_instance" "test-instance1" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" +} +`, ruleName) diff --git a/builtin/providers/oracleopc/resource_security_ip_list.go b/builtin/providers/oracleopc/resource_security_ip_list.go new file mode 100644 index 000000000..6a3e66b28 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_ip_list.go @@ -0,0 +1,117 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityIPList() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityIPListCreate, + Read: resourceSecurityIPListRead, + Update: resourceSecurityIPListUpdate, + Delete: resourceSecurityIPListDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_entries": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceSecurityIPListCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, ipEntries := getSecurityIPListResourceData(d) + + log.Printf("[DEBUG] Creating security IP list with name %s, entries %s", + name, ipEntries) + + client := meta.(*OPCClient).SecurityIPLists() + info, err := client.CreateSecurityIPList(name, ipEntries) + if err != nil { + return fmt.Errorf("Error creating security IP list %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityIPListResourceData(d, info) + return nil +} + +func updateSecurityIPListResourceData(d *schema.ResourceData, info *compute.SecurityIPListInfo) { + d.Set("name", info.Name) + d.Set("entries", info.SecIPEntries) +} + +func resourceSecurityIPListRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityIPLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security IP list %s", name) + result, err := client.GetSecurityIPList(name) + if err != nil { + // Security IP List does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security IP list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security IP list %s: %#v", name, result) + updateSecurityIPListResourceData(d, result) + return nil +} + +func getSecurityIPListResourceData(d *schema.ResourceData) (string, []string) { + name := d.Get("name").(string) + ipEntries := d.Get("ip_entries").([]interface{}) + ipEntryStrings := []string{} + for _, entry := range ipEntries { + ipEntryStrings = append(ipEntryStrings, entry.(string)) + } + return name, ipEntryStrings +} + +func resourceSecurityIPListUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityIPLists() + name, entries := getSecurityIPListResourceData(d) + + log.Printf("[DEBUG] Updating security IP list %s with ip entries %s", + name, entries) + + info, err := client.UpdateSecurityIPList(name, entries) + if err != nil { + return fmt.Errorf("Error updating security IP list %s: %s", name, err) + } + + updateSecurityIPListResourceData(d, info) + return nil +} + +func resourceSecurityIPListDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityIPLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting security IP list %s", name) + if err := client.DeleteSecurityIPList(name); err != nil { + return fmt.Errorf("Error deleting security IP list %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_list.go b/builtin/providers/oracleopc/resource_security_list.go new file mode 100644 index 000000000..eea11bbb1 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_list.go @@ -0,0 +1,119 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityList() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityListCreate, + Read: resourceSecurityListRead, + Update: resourceSecurityListUpdate, + Delete: resourceSecurityListDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "outbound_cidr_policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSecurityListCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) + + log.Printf("[DEBUG] Creating security list with name %s, policy %s, outbound CIDR policy %s", + name, policy, outboundCIDRPolicy) + + client := meta.(*OPCClient).SecurityLists() + info, err := client.CreateSecurityList(name, policy, outboundCIDRPolicy) + if err != nil { + return fmt.Errorf("Error creating security list %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityListResourceData(d, info) + return nil +} + +func updateSecurityListResourceData(d *schema.ResourceData, info *compute.SecurityListInfo) { + d.Set("name", info.Name) + d.Set("policy", info.Policy) + d.Set("outbound_cidr_policy", info.OutboundCIDRPolicy) +} + +func resourceSecurityListRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security list %s", name) + result, err := client.GetSecurityList(name) + if err != nil { + // Security List does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSecurityListResourceData(d, result) + return nil +} + +func getSecurityListResourceData(d *schema.ResourceData) (string, string, string) { + return d.Get("name").(string), + d.Get("policy").(string), + d.Get("outbound_cidr_policy").(string) +} + +func resourceSecurityListUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityLists() + name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) + + log.Printf("[DEBUG] Updating security list %s with policy %s, outbound_cidr_policy %s", + name, policy, outboundCIDRPolicy) + + info, err := client.UpdateSecurityList(name, policy, outboundCIDRPolicy) + if err != nil { + return fmt.Errorf("Error updating security list %s: %s", name, err) + } + + updateSecurityListResourceData(d, info) + return nil +} + +func resourceSecurityListDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSecurityList(name); err != nil { + return fmt.Errorf("Error deleting security list %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_rule.go b/builtin/providers/oracleopc/resource_security_rule.go new file mode 100644 index 000000000..0d9eb562c --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_rule.go @@ -0,0 +1,143 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityRule() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityRuleCreate, + Read: resourceSecurityRuleRead, + Update: resourceSecurityRuleUpdate, + Delete: resourceSecurityRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_list": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination_list": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "application": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSecurityRuleCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) + + log.Printf("[DEBUG] Creating security list with name %s, sourceList %s, destinationList %s, application %s, action %s, disabled %s", + name, sourceList, destinationList, application, action, disabled) + + client := meta.(*OPCClient).SecurityRules() + info, err := client.CreateSecurityRule(name, sourceList, destinationList, application, action, disabled) + if err != nil { + return fmt.Errorf("Error creating security rule %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityRuleResourceData(d, info) + return nil +} + +func updateSecurityRuleResourceData(d *schema.ResourceData, info *compute.SecurityRuleInfo) { + d.Set("name", info.Name) + d.Set("source_list", info.SourceList) + d.Set("destination_list", info.DestinationList) + d.Set("application", info.Application) + d.Set("action", info.Action) + d.Set("disabled", info.Disabled) +} + +func resourceSecurityRuleRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityRules() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security rule %s", name) + result, err := client.GetSecurityRule(name) + if err != nil { + // Security Rule does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSecurityRuleResourceData(d, result) + return nil +} + +func getSecurityRuleResourceData(d *schema.ResourceData) (string, string, string, string, string, bool) { + return d.Get("name").(string), + d.Get("source_list").(string), + d.Get("destination_list").(string), + d.Get("application").(string), + d.Get("action").(string), + d.Get("disabled").(bool) +} + +func resourceSecurityRuleUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityRules() + name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) + + log.Printf("[DEBUG] Updating security list %s with sourceList %s, destinationList %s, application %s, action %s, disabled %s", + name, sourceList, destinationList, application, action, disabled) + + info, err := client.UpdateSecurityRule(name, sourceList, destinationList, application, action, disabled) + if err != nil { + return fmt.Errorf("Error updating security rule %s: %s", name, err) + } + + updateSecurityRuleResourceData(d, info) + return nil +} + +func resourceSecurityRuleDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityRules() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSecurityRule(name); err != nil { + return fmt.Errorf("Error deleting security rule %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_rule_test.go b/builtin/providers/oracleopc/resource_security_rule_test.go new file mode 100644 index 000000000..f09c2b879 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_rule_test.go @@ -0,0 +1,85 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceSecurityRule_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + ruleResourceName, + testAccCheckRuleDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccSecurityRuleBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + ruleResourceName, + testAccCheckRuleExists), + ), + }, + }, + }) +} + +func testAccCheckRuleExists(state *OPCResourceState) error { + ruleName := getRuleName(state) + + if _, err := state.SecurityRules().GetSecurityRule(ruleName); err != nil { + return fmt.Errorf("Error retrieving state of security rule %s: %s", ruleName, err) + } + + return nil +} + +func getRuleName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckRuleDestroyed(state *OPCResourceState) error { + ruleName := getRuleName(state) + if info, err := state.SecurityRules().GetSecurityRule(ruleName); err == nil { + return fmt.Errorf("Rule %s still exists: %#v", ruleName, info) + } + + return nil +} + +const ruleName = "test_rule" +const secListName = "sec-list1" +const secIpListName = "sec-ip-list1" + +var ruleResourceName = fmt.Sprintf("opc_compute_security_rule.%s", ruleName) + +var testAccSecurityRuleBasic = fmt.Sprintf(` +resource "opc_compute_security_rule" "%s" { + name = "test" + source_list = "seclist:${opc_compute_security_list.sec-list1.name}" + destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" + action = "PERMIT" + application = "${opc_compute_security_application.spring-boot.name}" + disabled = false +} + +resource "opc_compute_security_list" "%s" { + name = "sec-list-1" + policy = "PERMIT" + outbound_cidr_policy = "DENY" +} + +resource "opc_compute_security_application" "spring-boot" { + name = "spring-boot" + protocol = "tcp" + dport = "8080" +} + +resource "opc_compute_security_ip_list" "%s" { + name = "sec-ip-list1" + ip_entries = ["217.138.34.4"] +} +`, ruleName, secListName, secIpListName) diff --git a/builtin/providers/oracleopc/resource_ssh_key.go b/builtin/providers/oracleopc/resource_ssh_key.go new file mode 100644 index 000000000..29f68b4aa --- /dev/null +++ b/builtin/providers/oracleopc/resource_ssh_key.go @@ -0,0 +1,117 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourceSSHKeyCreate, + Read: resourceSSHKeyRead, + Update: resourceSSHKeyUpdate, + Delete: resourceSSHKeyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + key := d.Get("key").(string) + enabled := d.Get("enabled").(bool) + + log.Printf("[DEBUG] Creating ssh key with name %s, key %s, enabled %s", + name, key, enabled) + + info, err := client.CreateSSHKey(name, key, enabled) + if err != nil { + return fmt.Errorf("Error creating ssh key %s: %s", name, err) + } + + d.SetId(info.Name) + updateSSHKeyResourceData(d, info) + return nil +} + +func updateSSHKeyResourceData(d *schema.ResourceData, info *compute.SSHKeyInfo) { + d.Set("name", info.Name) + d.Set("key", info.Key) + d.Set("enabled", info.Enabled) +} + +func resourceSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ssh key %s", name) + result, err := client.GetSSHKey(name) + if err != nil { + // SSH Key does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ssh key %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSSHKeyResourceData(d, result) + return nil +} + +func resourceSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + key := d.Get("key").(string) + enabled := d.Get("enabled").(bool) + + log.Printf("[DEBUG] Updating ssh key with name %s, key %s, enabled %s", + name, key, enabled) + + info, err := client.UpdateSSHKey(name, key, enabled) + if err != nil { + return fmt.Errorf("Error updating ssh key %s: %s", name, err) + } + + updateSSHKeyResourceData(d, info) + return nil +} + +func resourceSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSSHKey(name); err != nil { + return fmt.Errorf("Error deleting ssh key %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_storage_volume.go b/builtin/providers/oracleopc/resource_storage_volume.go new file mode 100644 index 000000000..2d80d09f2 --- /dev/null +++ b/builtin/providers/oracleopc/resource_storage_volume.go @@ -0,0 +1,301 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceStorageVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageVolumeCreate, + Read: resourceStorageVolumeRead, + Update: resourceStorageVolumeUpdate, + Delete: resourceStorageVolumeDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "sizeInBytes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "storage": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "/oracle/public/storage/default", + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "bootableImage": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "bootableImageVersion": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: -1, + }, + + "snapshot": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "snapshotId": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceStorageVolumeCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + properties := []string{d.Get("storage").(string)} + + spec := sv.NewStorageVolumeSpec( + d.Get("size").(string), + properties, + name) + + if d.Get("description").(string) != "" { + spec.SetDescription(d.Get("description").(string)) + } + + spec.SetTags(getTags(d)) + + if d.Get("bootableImage") != "" { + spec.SetBootableImage(d.Get("bootableImage").(string), d.Get("bootableImageVersion").(int)) + } + + if len(d.Get("snapshot").(*schema.Set).List()) > 0 { + snapshotDetails := d.Get("snapshot").(*schema.Set).List()[0].(map[string]interface{}) + spec.SetSnapshot( + snapshotDetails["name"].(string), + snapshotDetails["account"].(string), + ) + } + + if d.Get("snapshotId") != "" { + spec.SetSnapshotID(d.Get("snapshotId").(string)) + } + + log.Printf("[DEBUG] Creating storage volume %s with spec %#v", name, spec) + err := sv.CreateStorageVolume(spec) + if err != nil { + return fmt.Errorf("Error creating storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for storage volume %s to come online", name) + info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for storage volume %s to come online: %s", name, err) + } + + log.Printf("[DEBUG] Created storage volume %s: %#v", name, info) + + cachedAttachments, attachmentsFound := meta.(*OPCClient).storageAttachmentsByVolumeCache[name] + if attachmentsFound { + log.Printf("[DEBUG] Rebuilding storage attachments for volume %s", name) + for _, cachedAttachment := range cachedAttachments { + log.Printf("[DEBUG] Rebuilding storage attachments between volume %s and instance %s", + name, + cachedAttachment.instanceName) + + attachmentInfo, err := meta.(*OPCClient).StorageAttachments().CreateStorageAttachment( + cachedAttachment.index, + cachedAttachment.instanceName, + name, + ) + + if err != nil { + return fmt.Errorf( + "Error recreating storage attachment between volume %s and instance %s: %s", + name, + *cachedAttachment.instanceName, + err) + } + err = meta.(*OPCClient).StorageAttachments().WaitForStorageAttachmentCreated( + attachmentInfo.Name, + meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf( + "Error recreating storage attachment between volume %s and instance %s: %s", + name, + *cachedAttachment.instanceName, + err) + } + } + meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = nil + } + + d.SetId(name) + updateResourceData(d, info) + return nil +} + +func getTags(d *schema.ResourceData) []string { + tags := []string{} + for _, i := range d.Get("tags").([]interface{}) { + tags = append(tags, i.(string)) + } + return tags +} + +func updateResourceData(d *schema.ResourceData, info *compute.StorageVolumeInfo) error { + d.Set("name", info.Name) + d.Set("description", info.Description) + d.Set("storage", info.Properties[0]) + d.Set("sizeInBytes", info.Size) + d.Set("tags", info.Tags) + d.Set("bootableImage", info.ImageList) + d.Set("bootableImageVersion", info.ImageListEntry) + if info.Snapshot != "" { + d.Set("snapshot", map[string]interface{}{ + "name": info.Snapshot, + "account": info.SnapshotAccount, + }) + } + d.Set("snapshotId", info.SnapshotID) + + return nil +} + +func resourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of storage volume %s", name) + result, err := sv.GetStorageVolume(name) + if err != nil { + // Volume doesn't exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading storage volume %s: %s", name, err) + } + + if len(result.Result) == 0 { + // Volume doesn't exist + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read state of storage volume %s: %#v", name, &result.Result[0]) + updateResourceData(d, &result.Result[0]) + + return nil +} + +func resourceStorageVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + description := d.Get("description").(string) + size := d.Get("size").(string) + tags := getTags(d) + + log.Printf("[DEBUG] Updating storage volume %s with size %s, description %s, tags %#v", name, size, description, tags) + err := sv.UpdateStorageVolume(name, size, description, tags) + + if err != nil { + return fmt.Errorf("Error updating storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for updated storage volume %s to come online", name) + info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for updated storage volume %s to come online: %s", name, err) + } + + log.Printf("[DEBUG] Updated storage volume %s: %#v", name, info) + updateResourceData(d, info) + return nil +} + +func resourceStorageVolumeDelete(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + + sva := meta.(*OPCClient).StorageAttachments() + attachments, err := sva.GetStorageAttachmentsForVolume(name) + if err != nil { + return fmt.Errorf("Error retrieving storage attachments for volume %s: %s", name, err) + } + + attachmentsToCache := make([]storageAttachment, len(*attachments)) + for index, attachment := range *attachments { + log.Printf("[DEBUG] Deleting storage attachment %s for volume %s", attachment.Name, name) + sva.DeleteStorageAttachment(attachment.Name) + sva.WaitForStorageAttachmentDeleted(attachment.Name, meta.(*OPCClient).MaxRetryTimeout) + attachmentsToCache[index] = storageAttachment{ + index: attachment.Index, + instanceName: compute.InstanceNameFromString(attachment.InstanceName), + } + } + meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = attachmentsToCache + + log.Printf("[DEBUG] Deleting storage volume %s", name) + err = sv.DeleteStorageVolume(name) + if err != nil { + return fmt.Errorf("Error deleting storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for storage volume %s to finish deleting", name) + err = sv.WaitForStorageVolumeDeleted(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for storage volume %s to finish deleting: %s", name, err) + } + + log.Printf("[DEBUG] Deleted storage volume %s", name) + return nil +} diff --git a/builtin/providers/oracleopc/resource_storage_volume_test.go b/builtin/providers/oracleopc/resource_storage_volume_test.go new file mode 100644 index 000000000..d168b5309 --- /dev/null +++ b/builtin/providers/oracleopc/resource_storage_volume_test.go @@ -0,0 +1,70 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCStorageVolume_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + "opc_compute_storage_volume.test_volume", + testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageVolumeBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + "opc_compute_storage_volume.test_volume", + testAccCheckStorageVolumeExists), + ), + }, + }, + }) +} + +func testAccCheckStorageVolumeExists(state *OPCResourceState) error { + sv := state.StorageVolumes() + volumeName := state.Attributes["name"] + + info, err := sv.GetStorageVolume(volumeName) + if err != nil { + return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) + } + + if len(info.Result) == 0 { + return fmt.Errorf("No info found for volume %s", volumeName) + } + + return nil +} + +func testAccCheckStorageVolumeDestroyed(state *OPCResourceState) error { + sv := state.StorageVolumes() + + volumeName := state.Attributes["name"] + + info, err := sv.GetStorageVolume(volumeName) + if err != nil { + return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) + } + + if len(info.Result) != 0 { + return fmt.Errorf("Volume %s still exists", volumeName) + } + + return nil +} + +const testAccStorageVolumeBasic = ` +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} +` From e7e815d2c64d4d595acc926f3663d68612cda630 Mon Sep 17 00:00:00 2001 From: Stephen Cross Date: Wed, 29 Mar 2017 16:37:14 +0000 Subject: [PATCH 008/342] Initial docs for Oracle Compute Cloud provider --- .../r/opc_compute_instance.html.markdown | 68 +++++++++++++++++++ .../opc_compute_ip_association.html.markdown | 31 +++++++++ .../opc_compute_ip_reservation.html.markdown | 33 +++++++++ ...compute_security_application.html.markdown | 39 +++++++++++ ...compute_security_association.html.markdown | 29 ++++++++ ...opc_compute_security_ip_list.html.markdown | 28 ++++++++ .../r/opc_compute_security_list.html.markdown | 33 +++++++++ .../r/opc_compute_security_rule.html.markdown | 46 +++++++++++++ .../r/opc_compute_ssh_key.html.markdown | 32 +++++++++ .../opc_compute_storage_volume.html.markdown | 49 +++++++++++++ 10 files changed, 388 insertions(+) create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown new file mode 100644 index 000000000..04762b5c3 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_instance" +sidebar_current: "docs-opc-resource-instance" +description: |- + Creates and manages an instance in an OPC identity domain. +--- + +# opc\_compute\_instance + +The ``opc_compute_instance`` resource creates and manages an instance in an OPC identity domain. + +~> **Caution:** The ``opc_compute_instance`` resource can completely delete your +instance just as easily as it can create it. To avoid costly accidents, +consider setting +[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) +on your instance resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_instance" "test_instance" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.key1.name}"] + attributes = "{\"foo\":\"bar\"}" + storage = [{ + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + }, + { + index = 2 + volume = "${opc_compute_storage_volume.test_volume2.name}" + }] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the instance. This need not be unique, as each instance is assigned a separate +computed `opcId`. + +* `shape` - (Required) The shape of the instance, e.g. `oc4`. + +* `imageList` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1` + +* `label` - (Optional) The label to apply to the instance. + +* `ip` - (Computed) The internal IP address assigned to the instance. + +* `opcId` - (Computed) The interned ID assigned to the instance. + +* `sshKeys` - (Optional) The names of the SSH Keys that can be used to log into the instance. + +* `attributes` - (Optional) An arbitrary JSON-formatted collection of attributes which is made available to the instance. + +* `vcable` - (Computed) The ID of the instance's VCable, which is used to associate it with reserved IP addresses and +add it to Security Lists. + +* `storage` - (Optional) A set of zero or more storage volumes to attach to the instance. Each volume has two arguments: +`index`, which is the volume's index in the instance's list of mounted volumes, and `name`, which is the name of the +storage volume to mount. + +* `bootOrder` - (Optional) The index number of the bootable storage volume that should be used to boot the instance. e.g. `[ 1 ]`. If you specify both `bootOrder` and `imageList`, the imagelist attribute is ignored. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown new file mode 100644 index 000000000..deeed76c4 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown @@ -0,0 +1,31 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ip_association" +sidebar_current: "docs-opc-resource-ip-association" +description: |- + Creates and manages an IP association in an OPC identity domain. +--- + +# opc\_compute\_ip\_association + +The ``opc_compute_ip_association`` resource creates and manages an association between an IP address and an instance in +an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_association" "instance1_reservation1" { + vcable = "${opc_compute_instance.test_instance.vcable}" + parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vcable` - (Required) The vcable of the instance to associate the IP address with. + +* `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use +the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the +prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown new file mode 100644 index 000000000..7c44c62ea --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ip_reservation" +sidebar_current: "docs-opc-resource-ip-assocation" +description: |- + Creates and manages an IP reservation in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_reservation" "reservation1" { + parentpool = "/oracle/public/ippool" + permanent = true + tags = [] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `parentpool` - (Required) The pool from which to allocate the IP address. + +* `permanent` - (Required) Whether the IP address remains reserved even when it is no longer associated with an instance +(if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or +deleted and recreated (if false). + +* `tags` - (Optional) List of tags that may be applied to the IP reservation. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown new file mode 100644 index 000000000..fe8c9ba3c --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_application" +sidebar_current: "docs-opc-resource-security-application" +description: |- + Creates and manages a security application in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_application" "tomcat" { + name = "tomcat" + protocol = "tcp" + dport = "8080" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the application + +* `protocol` - (Required) The protocol to enable for this application. Must be either one of +`tcp`, `udp`, `icmp`, `igmp`, `ipip`, `rdp`, `esp`, `ah`, `gre`, `icmpv6`, `ospf`, `pim`, `sctp`, `mplsip` or `all`, or +the corresponding integer in the range 0-254 from the list of [assigned protocol numbers](http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + +* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. + +* `icmptype` - (Optional) The ICMP type to enable for this application, if the `protocol` is `icmp`. Must be one of +`echo`, `reply`, `ttl`, `traceroute`, `unreachable`. + +* `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of +`network`, `host`, `protocol`, `port`, `df`, `admin`. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown new file mode 100644 index 000000000..170acc2ea --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown @@ -0,0 +1,29 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_association" +sidebar_current: "docs-opc-resource-security-association" +description: |- + Creates and manages a security association in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security +list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_association" "test_instance_sec_list_1" { + vcable = "${opc_compute_instance.test_instance.vcable}" + seclist = "${opc_compute_security_list.sec_list1.name}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vcable` - (Required) The `vcable` of the instance to associate to the security list. + +* `seclist` - (Required) The name of the security list to associate the instance to. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown new file mode 100644 index 000000000..bded4c30e --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown @@ -0,0 +1,28 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_ip_list" +sidebar_current: "docs-opc-resource-security-list" +description: |- + Creates and manages a security IP list in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_ip_list" "sec_ip_list1" { + name = "sec-ip-list1" + ip_entries = ["217.138.34.4"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security IP list. + +* `ip_entries` - (Required) The IP addresses to include in the list. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown new file mode 100644 index 000000000..7da5e5668 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_list" +sidebar_current: "docs-opc-resource-security-list" +description: |- + Creates and manages a security list in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_list" "sec_list1" { + name = "sec-list-1" + policy = "permit" + outbound_cidr_policy = "deny" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security list. + +* `policy` - (Required) The policy to apply to instances associated with this list. Must be one of `permit`, +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). + +* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown new file mode 100644 index 000000000..02c4b7533 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_rule" +sidebar_current: "docs-opc-resource-security-rule" +description: |- + Creates and manages a security rule in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain, which joins +together a source security list (or security IP list), a destination security list (or security IP list), and a security +application. + +## Example Usage + +``` +resource "opc_compute_security_rule" "test_rule" { + name = "test" + source_list = "seclist:${opc_compute_security_list.sec-list1.name}" + destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" + action = "permit" + application = "${opc_compute_security_application.spring-boot.name}" + disabled = false +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security rule. + +* `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with +`seciplist:`). + + * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with + `seciplist:`). + +* `application` - (Required) The name of the application to which the rule applies. + +* `action` - (Required) Whether to `permit`, `refuse` or `deny` packets to which this rule applies. This will ordinarily +be `permit`. + +* `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule +without removing it outright from your Terraform resource definition. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown new file mode 100644 index 000000000..9655653a9 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ssh_key" +sidebar_current: "docs-opc-resource-instance" +description: |- + Creates and manages an SSH key in an OPC identity domain. +--- + +# opc\_compute\_ssh_key + +The ``opc_compute_ssh_key`` resource creates and manages an SSH key in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..." + enabled = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within this identity domain) name of the SSH key. + +* `key` - (Required) The SSH key itself + +* `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, +without removing it entirely from your Terraform resource definition. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown new file mode 100644 index 000000000..0e91a8ad7 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown @@ -0,0 +1,49 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_storage_volume" +sidebar_current: "docs-opc-resource-storage_volume" +description: |- + Creates and manages a storage volume in an OPC identity domain. +--- + +# opc\_compute\_storage\_volume + +The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain. + +~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your +storage volume just as easily as it can create it. To avoid costly accidents, +consider setting +[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) +on your storage volume resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My storage volume" + name = "test_volume_a" + tags = ["xyzzy", "quux"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within this identity domain) name of the storage volume. + +* `size` - (Required) The size of the storage instance. + +* `description` - (Optional) A description of the storage volume. + +* `tags` - (Optional) A list of tags to apply to the storage volume. + +* `bootableImage` - (Optional) The name of the bootable image the storage volume is loaded with. + +* `bootableImageVersion` - (Optional) The version of the bootable image specified in `bootableImage` to use. + +* `snapshot` - (Optional) The snapshot to initialise the storage volume with. This has two nested properties: `name`, +for the name of the snapshot to use, and `account` for the name of the snapshot account to use. + +* `snapshotId` - (Optional) The id of the snapshot to initialise the storage volume with. From 7aecce440883d00a3b936ae5f9403351fb58dc4a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 29 Mar 2017 19:30:04 +0200 Subject: [PATCH 009/342] Hooking up the OracleOPC Docs --- website/source/assets/stylesheets/_docs.scss | 1 + .../d/opc_compute_vnic.html.markdown | 36 +++++++++++ .../providers/oracleopc/index.html.markdown | 55 +++++++++++++++++ .../r/opc_compute_instance.html.markdown | 4 +- .../opc_compute_ip_association.html.markdown | 6 +- .../opc_compute_ip_reservation.html.markdown | 6 +- ...compute_security_application.html.markdown | 8 +-- ...compute_security_association.html.markdown | 8 +-- ...opc_compute_security_ip_list.html.markdown | 8 +-- .../r/opc_compute_security_list.html.markdown | 8 +-- .../r/opc_compute_security_rule.html.markdown | 8 +-- .../r/opc_compute_ssh_key.html.markdown | 6 +- .../opc_compute_storage_volume.html.markdown | 4 +- website/source/layouts/docs.erb | 4 ++ website/source/layouts/oracleopc.erb | 59 +++++++++++++++++++ 15 files changed, 188 insertions(+), 33 deletions(-) create mode 100644 website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown create mode 100644 website/source/docs/providers/oracleopc/index.html.markdown create mode 100644 website/source/layouts/oracleopc.erb diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 35f16eb60..9f2922c21 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -50,6 +50,7 @@ body.layout-nomad, body.layout-ns1, body.layout-openstack, body.layout-opsgenie, +body.layout-oracleopc, body.layout-packet, body.layout-pagerduty, body.layout-postgresql, diff --git a/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown b/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown new file mode 100644 index 000000000..78be49c4a --- /dev/null +++ b/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown @@ -0,0 +1,36 @@ +--- +layout: "oracleopc" +page_title: "Oracle: opc_compute_vnic" +sidebar_current: "docs-oracleopc-datasource-vnic" +description: |- + Gets information about the configuration of a Virtual NIC. +--- + +# opc\_compute\_vnic + +Use this data source to access the configuration of a Virtual NIC. + +## Example Usage + +``` +data "opc_compute_vnic" "current" {} + +output "mac_address" { + value = "${data.opc_compute_vnic.current.mac_address}" +} +``` + +## Argument Reference +* `name` is the name of the Virtual NIC. + +## Attributes Reference + +* `description` is a description of the Virtual NIC. + +* `mac_address` is the MAC Address of the Virtual NIC. + +* `tags` is a list of Tags associated with the Virtual NIC. + +* `transit_flag` is `true` if the Virtual NIC is of the type `transit`. + +* `uri` is the Unique Resource Locator of the Virtual NIC. diff --git a/website/source/docs/providers/oracleopc/index.html.markdown b/website/source/docs/providers/oracleopc/index.html.markdown new file mode 100644 index 000000000..598346919 --- /dev/null +++ b/website/source/docs/providers/oracleopc/index.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "oracleopc" +page_title: "Provider: Oracle Public Cloud" +sidebar_current: "docs-oracleopc-index" +description: |- + The Oracle Public Cloud provider is used to interact with the many resources supported by the Oracle Public Cloud. The provider needs to be configured with credentials for the Oracle Public Cloud API. +--- + +# Oracle Public Cloud Provider + +The Oracle Public Cloud provider is used to interact with the many resources supported by the Oracle Public Cloud. The provider needs to be configured with credentials for the Oracle Public Cloud API. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Oracle Public Cloud +provider "oracle" { + user = "..." + password = "..." + identity_domain = "..." + endpoint = "..." +} + +# Create an IP Reservation +resource "opc_compute_ip_reservation" "production" { + parent_pool = "/oracle/public/ippool" + permanent = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `user` - (Optional) The username to use, generally your email address. It can also + be sourced from the `OPC_USERNAME` environment variable. + +* `password` - (Optional) The password associated with the username to use. It can also be sourced from + the `OPC_PASSWORD` environment variable. + +* `identity_domain` - (Optional) The identity domain to use. It can also be sourced from + the `OPC_IDENTITY_DOMAIN` environment variable. + +* `endpoint` - (Optional) The API endpoint to use, associated with your Oracle Public Cloud account. This is known as the `REST Endpoint` within the Oracle portal. It can also be sourced from the `OPC_ENDPOINT` environment variable. + +Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000) +* `max_retry_timeout` - (Optional) The maximum number of seconds to wait for a successful response when operating on resources within Oracle Public Cloud. It can also be sourced from the `OPC_MAX_RETRY_TIMEOUT` environment variable. Defaults to 3000 seconds. + +## Testing + +Credentials must be provided via the `OPC_USERNAME`, `OPC_PASSWORD`, +`OPC_IDENTITY_DOMAIN` and `OPC_ENDPOINT` environment variables in order to run +acceptance tests. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown index 04762b5c3..faeb3ee7c 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_instance" -sidebar_current: "docs-opc-resource-instance" +sidebar_current: "docs-oracleopc-resource-instance" description: |- Creates and manages an instance in an OPC identity domain. --- diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown index deeed76c4..2518b2df1 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ip_association" -sidebar_current: "docs-opc-resource-ip-association" +sidebar_current: "docs-oracleopc-resource-ip-association" description: |- Creates and manages an IP association in an OPC identity domain. --- @@ -28,4 +28,4 @@ The following arguments are supported: * `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the -prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. \ No newline at end of file +prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown index 7c44c62ea..44b70cc0f 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ip_reservation" -sidebar_current: "docs-opc-resource-ip-assocation" +sidebar_current: "docs-oracleopc-resource-ip-reservation" description: |- Creates and manages an IP reservation in an OPC identity domain. --- @@ -30,4 +30,4 @@ The following arguments are supported: (if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or deleted and recreated (if false). -* `tags` - (Optional) List of tags that may be applied to the IP reservation. \ No newline at end of file +* `tags` - (Optional) List of tags that may be applied to the IP reservation. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown index fe8c9ba3c..94760f082 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_application" -sidebar_current: "docs-opc-resource-security-application" +sidebar_current: "docs-oracleopc-resource-security-application" description: |- Creates and manages a security application in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_application The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. @@ -36,4 +36,4 @@ the corresponding integer in the range 0-254 from the list of [assigned protocol `echo`, `reply`, `ttl`, `traceroute`, `unreachable`. * `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of -`network`, `host`, `protocol`, `port`, `df`, `admin`. \ No newline at end of file +`network`, `host`, `protocol`, `port`, `df`, `admin`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown index 170acc2ea..49207c879 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_association" -sidebar_current: "docs-opc-resource-security-association" +sidebar_current: "docs-oracleopc-resource-security-association" description: |- Creates and manages a security association in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_association The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security list in an OPC identity domain. @@ -26,4 +26,4 @@ The following arguments are supported: * `vcable` - (Required) The `vcable` of the instance to associate to the security list. -* `seclist` - (Required) The name of the security list to associate the instance to. \ No newline at end of file +* `seclist` - (Required) The name of the security list to associate the instance to. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown index bded4c30e..62f40d839 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_ip_list" -sidebar_current: "docs-opc-resource-security-list" +sidebar_current: "docs-oracleopc-resource-security-ip-list" description: |- Creates and manages a security IP list in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_ip\_list The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain. @@ -25,4 +25,4 @@ The following arguments are supported: * `name` - (Required) The unique (within the identity domain) name of the security IP list. -* `ip_entries` - (Required) The IP addresses to include in the list. \ No newline at end of file +* `ip_entries` - (Required) The IP addresses to include in the list. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown index 7da5e5668..64547a41e 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_list" -sidebar_current: "docs-opc-resource-security-list" +sidebar_current: "docs-oracleopc-resource-security-list" description: |- Creates and manages a security list in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_list The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. @@ -30,4 +30,4 @@ The following arguments are supported: `reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). * `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, -`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). \ No newline at end of file +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown index 02c4b7533..6497b0265 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_rule" -sidebar_current: "docs-opc-resource-security-rule" +sidebar_current: "docs-oracleopc-resource-security-rule" description: |- Creates and manages a security rule in an OPC identity domain. --- @@ -33,7 +33,7 @@ The following arguments are supported: * `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with `seciplist:`). - + * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with `seciplist:`). @@ -43,4 +43,4 @@ The following arguments are supported: be `permit`. * `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule -without removing it outright from your Terraform resource definition. \ No newline at end of file +without removing it outright from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown index 9655653a9..ff85467d8 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ssh_key" -sidebar_current: "docs-opc-resource-instance" +sidebar_current: "docs-oracleopc-resource-ssh-key" description: |- Creates and manages an SSH key in an OPC identity domain. --- @@ -29,4 +29,4 @@ The following arguments are supported: * `key` - (Required) The SSH key itself * `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, -without removing it entirely from your Terraform resource definition. \ No newline at end of file +without removing it entirely from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown index 0e91a8ad7..4b30b59ed 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_storage_volume" -sidebar_current: "docs-opc-resource-storage_volume" +sidebar_current: "docs-oracleopc-resource-storage-volume" description: |- Creates and manages a storage volume in an OPC identity domain. --- diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 1f42c1e32..77d5bf2c3 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -355,6 +355,10 @@ OpsGenie + > + Oracle OPC + + > Packet diff --git a/website/source/layouts/oracleopc.erb b/website/source/layouts/oracleopc.erb new file mode 100644 index 000000000..a9d9579f8 --- /dev/null +++ b/website/source/layouts/oracleopc.erb @@ -0,0 +1,59 @@ +<% wrap_layout :inner do %> +<% content_for :sidebar do %> + +<% end %> + +<%= yield %> +<% end %> From 8d8d3a728afaf7ab2b290d119ed231e5f30cd4ba Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Sat, 1 Apr 2017 13:01:53 -0400 Subject: [PATCH 010/342] Google Compute Address Importability --- .../google/import_compute_address_test.go | 28 +++++++++++++++++++ .../google/resource_compute_address.go | 7 ++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/google/import_compute_address_test.go diff --git a/builtin/providers/google/import_compute_address_test.go b/builtin/providers/google/import_compute_address_test.go new file mode 100644 index 000000000..db579f4c0 --- /dev/null +++ b/builtin/providers/google/import_compute_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeAddress_importBasic(t *testing.T) { + resourceName := "google_compute_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go index d4c962230..54a60cc0a 100644 --- a/builtin/providers/google/resource_compute_address.go +++ b/builtin/providers/google/resource_compute_address.go @@ -14,7 +14,12 @@ func resourceComputeAddress() *schema.Resource { Create: resourceComputeAddressCreate, Read: resourceComputeAddressRead, Delete: resourceComputeAddressDelete, - + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, From 8092d90f25961ace8b315cb9861acd7511960cbd Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Sat, 1 Apr 2017 13:25:34 -0400 Subject: [PATCH 011/342] Importability for Google Compute Global Address --- .../import_compute_global_address_test.go | 28 +++++++++++++++++++ .../google/resource_compute_global_address.go | 7 ++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/google/import_compute_global_address_test.go diff --git a/builtin/providers/google/import_compute_global_address_test.go b/builtin/providers/google/import_compute_global_address_test.go new file mode 100644 index 000000000..73e495644 --- /dev/null +++ b/builtin/providers/google/import_compute_global_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeGlobalAddress_importBasic(t *testing.T) { + resourceName := "google_compute_global_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go index e335e527a..7f4df04a4 100644 --- a/builtin/providers/google/resource_compute_global_address.go +++ b/builtin/providers/google/resource_compute_global_address.go @@ -14,7 +14,12 @@ func resourceComputeGlobalAddress() *schema.Resource { Create: resourceComputeGlobalAddressCreate, Read: resourceComputeGlobalAddressRead, Delete: resourceComputeGlobalAddressDelete, - + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, From 59d1d82d706dd59aff9453e124fb098e9bd7666c Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Sat, 1 Apr 2017 13:45:41 -0400 Subject: [PATCH 012/342] Update docs for newly importable resources --- website/source/docs/import/importability.html.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/import/importability.html.md b/website/source/docs/import/importability.html.md index 933c47a1d..8fbdb4878 100644 --- a/website/source/docs/import/importability.html.md +++ b/website/source/docs/import/importability.html.md @@ -141,9 +141,11 @@ To make a resource importable, please see the ### Google +* google_compute_address * google_compute_autoscaler * google_compute_firewall * google_compute_forwarding_rule +* google_compute_global_address * google_compute_http_health_check * google_compute_instance_group_manager * google_compute_instance_template From f15b74b4a4e22800be8cf3471455bca652cbf64f Mon Sep 17 00:00:00 2001 From: Joshua Spence Date: Mon, 3 Apr 2017 09:56:25 +1000 Subject: [PATCH 013/342] Fix DB parameter group name property It appears that #13232 doesn't work properly if you actually try to set any `parameter`s. Specifically, I was getting the following error: ``` * aws_db_parameter_group.test: 1 error(s) occurred: * aws_db_parameter_group.test: Error modifying DB Parameter Group: InvalidParameterValue: The parameter DBParameterGroupName must be provided and must not be blank. status code: 400, request id: 5783e396-17ff-11e7-87d5-e3fd4c7025ce ``` --- .../providers/aws/resource_aws_db_parameter_group.go | 1 + .../aws/resource_aws_db_parameter_group_test.go | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go index d5e943fd6..4e2611ff7 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group.go @@ -98,6 +98,7 @@ func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) } else { groupName = resource.UniqueId() } + d.Set("name", groupName) createOpts := rds.CreateDBParameterGroupInput{ DBParameterGroupName: aws.String(groupName), diff --git a/builtin/providers/aws/resource_aws_db_parameter_group_test.go b/builtin/providers/aws/resource_aws_db_parameter_group_test.go index b8e4e56c4..1d330bfc7 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group_test.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group_test.go @@ -715,11 +715,21 @@ const testAccDBParameterGroupConfig_namePrefix = ` resource "aws_db_parameter_group" "test" { name_prefix = "tf-test-" family = "mysql5.6" + + parameter { + name = "sync_binlog" + value = 0 + } } ` const testAccDBParameterGroupConfig_generatedName = ` resource "aws_db_parameter_group" "test" { family = "mysql5.6" + + parameter { + name = "sync_binlog" + value = 0 + } } ` From 403801cf6836d2b4897a2d6198705d88e1c06d08 Mon Sep 17 00:00:00 2001 From: Stephen Cross Date: Wed, 29 Mar 2017 16:30:08 +0000 Subject: [PATCH 014/342] Initial Oracle Compute Cloud provider --- builtin/providers/oracleopc/config.go | 47 +++ builtin/providers/oracleopc/provider.go | 75 +++++ builtin/providers/oracleopc/provider_test.go | 61 ++++ .../providers/oracleopc/resource_instance.go | 306 ++++++++++++++++++ .../oracleopc/resource_instance_test.go | 156 +++++++++ .../oracleopc/resource_ip_association.go | 103 ++++++ .../oracleopc/resource_ip_association_test.go | 74 +++++ .../oracleopc/resource_ip_reservation.go | 122 +++++++ .../resource_security_application.go | 124 +++++++ .../resource_security_association.go | 103 ++++++ .../resource_security_association_test.go | 75 +++++ .../oracleopc/resource_security_ip_list.go | 117 +++++++ .../oracleopc/resource_security_list.go | 119 +++++++ .../oracleopc/resource_security_rule.go | 143 ++++++++ .../oracleopc/resource_security_rule_test.go | 85 +++++ .../providers/oracleopc/resource_ssh_key.go | 117 +++++++ .../oracleopc/resource_storage_volume.go | 301 +++++++++++++++++ .../oracleopc/resource_storage_volume_test.go | 70 ++++ 18 files changed, 2198 insertions(+) create mode 100644 builtin/providers/oracleopc/config.go create mode 100644 builtin/providers/oracleopc/provider.go create mode 100644 builtin/providers/oracleopc/provider_test.go create mode 100644 builtin/providers/oracleopc/resource_instance.go create mode 100644 builtin/providers/oracleopc/resource_instance_test.go create mode 100644 builtin/providers/oracleopc/resource_ip_association.go create mode 100644 builtin/providers/oracleopc/resource_ip_association_test.go create mode 100644 builtin/providers/oracleopc/resource_ip_reservation.go create mode 100644 builtin/providers/oracleopc/resource_security_application.go create mode 100644 builtin/providers/oracleopc/resource_security_association.go create mode 100644 builtin/providers/oracleopc/resource_security_association_test.go create mode 100644 builtin/providers/oracleopc/resource_security_ip_list.go create mode 100644 builtin/providers/oracleopc/resource_security_list.go create mode 100644 builtin/providers/oracleopc/resource_security_rule.go create mode 100644 builtin/providers/oracleopc/resource_security_rule_test.go create mode 100644 builtin/providers/oracleopc/resource_ssh_key.go create mode 100644 builtin/providers/oracleopc/resource_storage_volume.go create mode 100644 builtin/providers/oracleopc/resource_storage_volume_test.go diff --git a/builtin/providers/oracleopc/config.go b/builtin/providers/oracleopc/config.go new file mode 100644 index 000000000..fbae3b5d5 --- /dev/null +++ b/builtin/providers/oracleopc/config.go @@ -0,0 +1,47 @@ +package opc + +import ( + "fmt" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "net/url" +) + +type Config struct { + User string + Password string + IdentityDomain string + Endpoint string + MaxRetryTimeout int +} + +type storageAttachment struct { + index int + instanceName *compute.InstanceName +} + +type OPCClient struct { + *compute.AuthenticatedClient + MaxRetryTimeout int + storageAttachmentsByVolumeCache map[string][]storageAttachment +} + +func (c *Config) Client() (*OPCClient, error) { + u, err := url.ParseRequestURI(c.Endpoint) + if err != nil { + return nil, fmt.Errorf("Invalid endpoint URI: %s", err) + } + + client := compute.NewComputeClient(c.IdentityDomain, c.User, c.Password, u) + authenticatedClient, err := client.Authenticate() + if err != nil { + return nil, fmt.Errorf("Authentication failed: %s", err) + } + + opcClient := &OPCClient{ + AuthenticatedClient: authenticatedClient, + MaxRetryTimeout: c.MaxRetryTimeout, + storageAttachmentsByVolumeCache: make(map[string][]storageAttachment), + } + + return opcClient, nil +} diff --git a/builtin/providers/oracleopc/provider.go b/builtin/providers/oracleopc/provider.go new file mode 100644 index 000000000..a6d0d3fb5 --- /dev/null +++ b/builtin/providers/oracleopc/provider.go @@ -0,0 +1,75 @@ +package opc + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "user": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_USERNAME", nil), + Description: "The user name for OPC API operations.", + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_PASSWORD", nil), + Description: "The user password for OPC API operations.", + }, + + "identityDomain": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_IDENTITY_DOMAIN", nil), + Description: "The OPC identity domain for API operations", + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_ENDPOINT", nil), + Description: "The HTTP endpoint for OPC API operations.", + }, + + "maxRetryTimeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_MAX_RETRY_TIMEOUT", 3000), + Description: "Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000)", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "opc_compute_storage_volume": resourceStorageVolume(), + "opc_compute_instance": resourceInstance(), + "opc_compute_ssh_key": resourceSSHKey(), + "opc_compute_security_application": resourceSecurityApplication(), + "opc_compute_security_list": resourceSecurityList(), + "opc_compute_security_ip_list": resourceSecurityIPList(), + "opc_compute_ip_reservation": resourceIPReservation(), + "opc_compute_ip_association": resourceIPAssociation(), + "opc_compute_security_rule": resourceSecurityRule(), + "opc_compute_security_association": resourceSecurityAssociation(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + User: d.Get("user").(string), + Password: d.Get("password").(string), + IdentityDomain: d.Get("identityDomain").(string), + Endpoint: d.Get("endpoint").(string), + MaxRetryTimeout: d.Get("maxRetryTimeout").(int), + } + + return config.Client() +} diff --git a/builtin/providers/oracleopc/provider_test.go b/builtin/providers/oracleopc/provider_test.go new file mode 100644 index 000000000..c60076b06 --- /dev/null +++ b/builtin/providers/oracleopc/provider_test.go @@ -0,0 +1,61 @@ +package opc + +import ( + "os" + "testing" + + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "opc": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + required := []string{"OPC_USERNAME", "OPC_PASSWORD", "OPC_IDENTITY_DOMAIN", "OPC_ENDPOINT"} + for _, prop := range required { + if os.Getenv(prop) == "" { + t.Fatalf("%s must be set for acceptance test", prop) + } + } +} + +type OPCResourceState struct { + *OPCClient + *terraform.InstanceState +} + +func opcResourceCheck(resourceName string, f func(checker *OPCResourceState) error) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Resource not found: %s", resourceName) + } + + state := &OPCResourceState{ + OPCClient: testAccProvider.Meta().(*OPCClient), + InstanceState: rs.Primary, + } + + return f(state) + } +} diff --git a/builtin/providers/oracleopc/resource_instance.go b/builtin/providers/oracleopc/resource_instance.go new file mode 100644 index 000000000..70f3b99c8 --- /dev/null +++ b/builtin/providers/oracleopc/resource_instance.go @@ -0,0 +1,306 @@ +package opc + +import ( + "encoding/json" + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceInstanceCreate, + Read: resourceInstanceRead, + Delete: resourceInstanceDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "shape": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "imageList": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "label": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip": { + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + + "opcId": { + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + + "sshKeys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "attributes": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "vcable": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "storage": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "volume": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "bootOrder": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func getAttrs(d *schema.ResourceData) (*map[string]interface{}, error) { + var attrs map[string]interface{} + + attrString := d.Get("attributes").(string) + if attrString == "" { + return &attrs, nil + } + if err := json.Unmarshal([]byte(attrString), &attrs); err != nil { + return &attrs, fmt.Errorf("Cannot parse '%s' as json", attrString) + } + return &attrs, nil +} + +func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + shape := d.Get("shape").(string) + imageList := d.Get("imageList").(string) + label := d.Get("label").(string) + storage := getStorageAttachments(d) + sshKeys := getSSHKeys(d) + bootOrder := getBootOrder(d) + + attrs, err := getAttrs(d) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating instance with name %s, shape %s, imageList %s, storage %s, bootOrder %s, label %s, sshKeys %s, attrs %#v", + name, shape, imageList, storage, bootOrder, label, sshKeys, attrs) + + id, err := client.LaunchInstance(name, label, shape, imageList, storage, bootOrder, sshKeys, *attrs) + if err != nil { + return fmt.Errorf("Error creating instance %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for instance %s to come online", id.String()) + info, err := client.WaitForInstanceRunning(id, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for instance %s to come online: %s", id, err) + } + + log.Printf("[DEBUG] Created instance %s: %#v", id, info) + + attachStorage( + &compute.InstanceName{ + Name: info.Name, + ID: info.ID, + }, + d, meta) + + d.SetId(info.Name) + updateInstanceResourceData(d, info) + return nil +} + +func attachStorage(name *compute.InstanceName, d *schema.ResourceData, meta interface{}) error { + storageClient := meta.(*OPCClient).StorageAttachments() + storage := d.Get("storage").(*schema.Set) + updatedStorage := schema.NewSet(storage.F, []interface{}{}) + + for _, i := range storage.List() { + attrs := i.(map[string]interface{}) + attachmentInfo, err := storageClient.CreateStorageAttachment( + attrs["index"].(int), + name, + attrs["volume"].(string)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for storage attachment %#v to come online", attachmentInfo) + storageClient.WaitForStorageAttachmentCreated(attachmentInfo.Name, meta.(*OPCClient).MaxRetryTimeout) + log.Printf("[DEBUG] Storage attachment %s: %s-%s created", + attachmentInfo.Name, attachmentInfo.InstanceName, attachmentInfo.StorageVolumeName) + attrs["name"] = attachmentInfo.Name + updatedStorage.Add(attrs) + } + + d.Set("storage", updatedStorage) + return nil +} + +func getSSHKeys(d *schema.ResourceData) []string { + sshKeys := []string{} + for _, i := range d.Get("sshKeys").([]interface{}) { + sshKeys = append(sshKeys, i.(string)) + } + return sshKeys +} + +func getBootOrder(d *schema.ResourceData) []int { + bootOrder := []int{} + for _, i := range d.Get("bootOrder").([]interface{}) { + bootOrder = append(bootOrder, i.(int)) + } + return bootOrder +} + +func getStorageAttachments(d *schema.ResourceData) []compute.LaunchPlanStorageAttachmentSpec { + storageAttachments := []compute.LaunchPlanStorageAttachmentSpec{} + storage := d.Get("storage").(*schema.Set) + for _, i := range storage.List() { + attrs := i.(map[string]interface{}) + storageAttachments = append(storageAttachments, compute.LaunchPlanStorageAttachmentSpec{ + Index: attrs["index"].(int), + Volume: attrs["volume"].(string), + }) + } + return storageAttachments +} + +func updateInstanceResourceData(d *schema.ResourceData, info *compute.InstanceInfo) error { + d.Set("name", info.Name) + d.Set("opcId", info.ID) + d.Set("imageList", info.ImageList) + d.Set("bootOrder", info.BootOrder) + d.Set("sshKeys", info.SSHKeys) + d.Set("label", info.Label) + d.Set("ip", info.IPAddress) + d.Set("vcable", info.VCableID) + + return nil +} + +func resourceInstanceRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + instanceName := &compute.InstanceName{ + Name: name, + ID: d.Get("opcId").(string), + } + + log.Printf("[DEBUG] Reading state of instance %s", instanceName) + result, err := client.GetInstance(instanceName) + if err != nil { + // Instance doesn't exist + if compute.WasNotFoundError(err) { + log.Printf("[DEBUG] Instance %s not found", instanceName) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading instance %s: %s", instanceName, err) + } + + log.Printf("[DEBUG] Read state of instance %s: %#v", instanceName, result) + + attachments, err := meta.(*OPCClient).StorageAttachments().GetStorageAttachmentsForInstance(instanceName) + if err != nil { + return fmt.Errorf("Error reading storage attachments for instance %s: %s", instanceName, err) + } + updateInstanceResourceData(d, result) + updateAttachmentResourceData(d, attachments) + return nil +} + +func updateAttachmentResourceData(d *schema.ResourceData, attachments *[]compute.StorageAttachmentInfo) { + attachmentSet := schema.NewSet(d.Get("storage").(*schema.Set).F, []interface{}{}) + for _, attachment := range *attachments { + properties := map[string]interface{}{ + "index": attachment.Index, + "volume": attachment.StorageVolumeName, + "name": attachment.Name, + } + attachmentSet.Add(properties) + } + d.Set("storage", attachmentSet) +} + +func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d.State()) + client := meta.(*OPCClient).Instances() + name := d.Get("name").(string) + + instanceName := &compute.InstanceName{ + Name: name, + ID: d.Get("opcId").(string), + } + + log.Printf("[DEBUG] Deleting instance %s", instanceName) + if err := client.DeleteInstance(instanceName); err != nil { + return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) + } + if err := client.WaitForInstanceDeleted(instanceName, meta.(*OPCClient).MaxRetryTimeout); err != nil { + return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) + } + + for _, attachment := range d.Get("storage").(*schema.Set).List() { + name := attachment.(map[string]interface{})["name"].(string) + log.Printf("[DEBUG] Deleting storage attachment %s", name) + client.StorageAttachments().DeleteStorageAttachment(name) + client.StorageAttachments().WaitForStorageAttachmentDeleted(name, meta.(*OPCClient).MaxRetryTimeout) + } + + return nil +} diff --git a/builtin/providers/oracleopc/resource_instance_test.go b/builtin/providers/oracleopc/resource_instance_test.go new file mode 100644 index 000000000..6f386af84 --- /dev/null +++ b/builtin/providers/oracleopc/resource_instance_test.go @@ -0,0 +1,156 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "testing" +) + +func TestAccOPCInstance_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + instanceResourceName, + testAccCheckInstanceDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccInstanceBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + instanceResourceName, + testAccCheckInstanceExists), + opcResourceCheck( + keyResourceName, + testAccCheckSSHKeyExists), + ), + }, + { + Config: modifySSHKey, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + instanceResourceName, + testAccCheckInstanceExists), + opcResourceCheck( + keyResourceName, + testAccCheckSSHKeyUpdated), + ), + }, + }, + }) +} + +func testAccCheckInstanceExists(state *OPCResourceState) error { + instanceName := getInstanceName(state) + + if _, err := state.Instances().GetInstance(instanceName); err != nil { + return fmt.Errorf("Error retrieving state of instance %s: %s", instanceName, err) + } + + return nil +} + +func testAccCheckSSHKeyExists(state *OPCResourceState) error { + keyName := state.Attributes["name"] + + if _, err := state.SSHKeys().GetSSHKey(keyName); err != nil { + return fmt.Errorf("Error retrieving state of key %s: %s", keyName, err) + } + + return nil +} + +func testAccCheckSSHKeyUpdated(state *OPCResourceState) error { + keyName := state.Attributes["name"] + info, err := state.SSHKeys().GetSSHKey(keyName) + if err != nil { + return err + } + if info.Key != updatedKey { + return fmt.Errorf("Expected key\n\t%s\nbut was\n\t%s", updatedKey, info.Key) + } + return nil +} + +func getInstanceName(rs *OPCResourceState) *compute.InstanceName { + return &compute.InstanceName{ + Name: rs.Attributes["name"], + ID: rs.Attributes["opcId"], + } +} + +func testAccCheckInstanceDestroyed(state *OPCResourceState) error { + instanceName := getInstanceName(state) + if info, err := state.Instances().GetInstance(instanceName); err == nil { + return fmt.Errorf("Instance %s still exists: %#v", instanceName, info) + } + + return nil +} + +const instanceName = "test_instance" +const keyName = "test_key" + +var instanceResourceName = fmt.Sprintf("opc_compute_instance.%s", instanceName) +var keyResourceName = fmt.Sprintf("opc_compute_ssh_key.%s", keyName) + +const originalKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" +const updatedKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHvb/2OSemgzUYLNW1/T3u33r7sZy1qbWtgVWiREH4gS5TVmDVPuvN1MFLdNqiWQA53gK8Gp24jtjNm9ftcPhicv81HVWJTB69C0sJGEfF0l4mgbemJLH3i37Mb6SdWJcGof9qHVDADPgiC8jIBVUhdiJSeq4fUJ3NQA2eUExBkRglQWairkNzPNA0mi3GL9KDGnoBnSCAXNGoKgDgIOqW0dYFP6oHyGWkF7V+/TME9aIQvmMpHjVzl7brZ/wED2t5vTJxxbgogHEmWnfs7p8EP5IsN6Vnjd0VNIt1tu3TduS8kH5npkPqZz8oIP93Ypxn0l7ZNEl9MahbhPj3gJ1YY7Cygrlt1VLC1ibBbOgIS2Lj6vGG/Yjkqs3Vw6qrmTRlsJ9c6bZO2xq0xzV11XQHvjPegBOClF6AztEe1jKU/RUFnzjIF8lUmM63fTaXuVkNERkTSE3E9XL3Uq6eqYdef7wHFFhCMSGotp3ANAb30kflysA9ID0b3o5QU2tB8OBxBicXQy11lh+u204YJuvIzeTXo+JAad5TWFlJcsUlbPFppLQdhUpoWaJouBGJV36DJb9R34i9T8Ze5tnJUQgPmMkERyPvb/+v5j3s2hs1A9WO6/MqmZd70gudsX/1bqWT898vCCOdM+CspNVY7nHVUtde7C6BrHzphr/C1YBXHw==" + +var testAccInstanceBasic = fmt.Sprintf(` +resource "opc_compute_instance" "%s" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.test_key.name}"] + attributes = "{\"foo\": \"bar\"}" + storage = { + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + } +} + +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} + +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "%s" + enabled = true +} +`, instanceName, keyName, originalKey) + +var modifySSHKey = fmt.Sprintf(` +resource "opc_compute_instance" "%s" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.test_key.name}"] + attributes = "{\"foo\": \"bar\"}" + storage = { + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + } +} + +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} + +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "%s" + enabled = true +} +`, instanceName, keyName, updatedKey) diff --git a/builtin/providers/oracleopc/resource_ip_association.go b/builtin/providers/oracleopc/resource_ip_association.go new file mode 100644 index 000000000..84df10ba8 --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_association.go @@ -0,0 +1,103 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceIPAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceIPAssociationCreate, + Read: resourceIPAssociationRead, + Delete: resourceIPAssociationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "vcable": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "parentpool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceIPAssociationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + vcable, parentpool := getIPAssociationResourceData(d) + + log.Printf("[DEBUG] Creating ip association between vcable %s and parent pool %s", + vcable, parentpool) + + client := meta.(*OPCClient).IPAssociations() + info, err := client.CreateIPAssociation(vcable, parentpool) + if err != nil { + return fmt.Errorf("Error creating ip association between vcable %s and parent pool %s: %s", + vcable, parentpool, err) + } + + d.SetId(info.Name) + updateIPAssociationResourceData(d, info) + return nil +} + +func updateIPAssociationResourceData(d *schema.ResourceData, info *compute.IPAssociationInfo) { + d.Set("name", info.Name) + d.Set("parentpool", info.ParentPool) + d.Set("vcable", info.VCable) +} + +func resourceIPAssociationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPAssociations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ip association %s", name) + result, err := client.GetIPAssociation(name) + if err != nil { + // IP Association does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ip association %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ip association %s: %#v", name, result) + updateIPAssociationResourceData(d, result) + return nil +} + +func getIPAssociationResourceData(d *schema.ResourceData) (string, string) { + return d.Get("vcable").(string), d.Get("parentpool").(string) +} + +func resourceIPAssociationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPAssociations() + name := d.Get("name").(string) + + vcable, parentpool := getIPAssociationResourceData(d) + log.Printf("[DEBUG] Deleting ip association %s between vcable %s and parent pool %s", + name, vcable, parentpool) + + if err := client.DeleteIPAssociation(name); err != nil { + return fmt.Errorf("Error deleting ip association %s between vcable %s and parent pool %s: %s", + name, vcable, parentpool, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_ip_association_test.go b/builtin/providers/oracleopc/resource_ip_association_test.go new file mode 100644 index 000000000..44f48474f --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_association_test.go @@ -0,0 +1,74 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceIPAssociation_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + ipAssociationResourceName, + testAccCheckIPAssociationDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccIPAssociationBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + ipAssociationResourceName, + testAccCheckIPAssociationExists), + ), + }, + }, + }) +} + +func testAccCheckIPAssociationExists(state *OPCResourceState) error { + associationName := getIPAssociationName(state) + + if _, err := state.IPAssociations().GetIPAssociation(associationName); err != nil { + return fmt.Errorf("Error retrieving state of ip assocation %s: %s", associationName, err) + } + + return nil +} + +func getIPAssociationName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckIPAssociationDestroyed(state *OPCResourceState) error { + associationName := getAssociationName(state) + if info, err := state.IPAssociations().GetIPAssociation(associationName); err == nil { + return fmt.Errorf("IP association %s still exists: %#v", associationName, info) + } + + return nil +} + +const ipAssociationName = "test_ip_association" + +var ipAssociationResourceName = fmt.Sprintf("opc_compute_ip_association.%s", ipAssociationName) + +var testAccIPAssociationBasic = fmt.Sprintf(` +resource "opc_compute_ip_reservation" "reservation1" { + parentpool = "/oracle/public/ippool" + permanent = true +} + +resource "opc_compute_ip_association" "%s" { + vcable = "${opc_compute_instance.test-instance1.vcable}" + parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" +} + +resource "opc_compute_instance" "test-instance1" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" +} +`, ipAssociationName) diff --git a/builtin/providers/oracleopc/resource_ip_reservation.go b/builtin/providers/oracleopc/resource_ip_reservation.go new file mode 100644 index 000000000..fa25679d2 --- /dev/null +++ b/builtin/providers/oracleopc/resource_ip_reservation.go @@ -0,0 +1,122 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceIPReservation() *schema.Resource { + return &schema.Resource{ + Create: resourceIPReservationCreate, + Read: resourceIPReservationRead, + Delete: resourceIPReservationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "permanent": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + + "parentpool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ip": &schema.Schema{ + Type: schema.TypeString, + Optional: false, + Computed: true, + }, + }, + } +} + +func resourceIPReservationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + parentpool, permanent, tags := getIPReservationResourceData(d) + + log.Printf("[DEBUG] Creating ip reservation from parentpool %s with tags=%s", + parentpool, tags) + + client := meta.(*OPCClient).IPReservations() + info, err := client.CreateIPReservation(parentpool, permanent, tags) + if err != nil { + return fmt.Errorf("Error creating ip reservation from parentpool %s with tags=%s: %s", + parentpool, tags, err) + } + + d.SetId(info.Name) + updateIPReservationResourceData(d, info) + return nil +} + +func updateIPReservationResourceData(d *schema.ResourceData, info *compute.IPReservationInfo) { + d.Set("name", info.Name) + d.Set("parentpool", info.ParentPool) + d.Set("permanent", info.Permanent) + d.Set("tags", info.Tags) + d.Set("ip", info.IP) +} + +func resourceIPReservationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPReservations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ip reservation %s", name) + result, err := client.GetIPReservation(name) + if err != nil { + // IP Reservation does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ip reservation %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ip reservation %s: %#v", name, result) + updateIPReservationResourceData(d, result) + return nil +} + +func getIPReservationResourceData(d *schema.ResourceData) (string, bool, []string) { + tagdata := d.Get("tags").([]interface{}) + tags := make([]string, len(tagdata)) + for i, tag := range tagdata { + tags[i] = tag.(string) + } + return d.Get("parentpool").(string), + d.Get("permanent").(bool), + tags +} + +func resourceIPReservationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).IPReservations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ip reservation %s", name) + + if err := client.DeleteIPReservation(name); err != nil { + return fmt.Errorf("Error deleting ip reservation %s", name) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_application.go b/builtin/providers/oracleopc/resource_security_application.go new file mode 100644 index 000000000..b7205754c --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_application.go @@ -0,0 +1,124 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityApplication() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityApplicationCreate, + Read: resourceSecurityApplicationRead, + Delete: resourceSecurityApplicationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dport": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "icmptype": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "icmpcode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceSecurityApplicationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, protocol, dport, icmptype, icmpcode, description := getSecurityApplicationResourceData(d) + + log.Printf("[DEBUG] Creating security application %s", name) + + client := meta.(*OPCClient).SecurityApplications() + info, err := client.CreateSecurityApplication(name, protocol, dport, icmptype, icmpcode, description) + if err != nil { + return fmt.Errorf("Error creating security application %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityApplicationResourceData(d, info) + return nil +} + +func updateSecurityApplicationResourceData(d *schema.ResourceData, info *compute.SecurityApplicationInfo) { + d.Set("name", info.Name) + d.Set("protocol", info.Protocol) + d.Set("dport", info.DPort) + d.Set("icmptype", info.ICMPType) + d.Set("icmpcode", info.ICMPCode) + d.Set("description", info.Description) +} + +func resourceSecurityApplicationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityApplications() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security application %s", name) + result, err := client.GetSecurityApplication(name) + if err != nil { + // Security Application does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security application %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security application %s: %#v", name, result) + updateSecurityApplicationResourceData(d, result) + return nil +} + +func getSecurityApplicationResourceData(d *schema.ResourceData) (string, string, string, string, string, string) { + return d.Get("name").(string), + d.Get("protocol").(string), + d.Get("dport").(string), + d.Get("icmptype").(string), + d.Get("icmpcode").(string), + d.Get("description").(string) +} + +func resourceSecurityApplicationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityApplications() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting security application %s", name) + + if err := client.DeleteSecurityApplication(name); err != nil { + return fmt.Errorf("Error deleting security application %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_association.go b/builtin/providers/oracleopc/resource_security_association.go new file mode 100644 index 000000000..15a912657 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_association.go @@ -0,0 +1,103 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityAssociationCreate, + Read: resourceSecurityAssociationRead, + Delete: resourceSecurityAssociationDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "vcable": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "seclist": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceSecurityAssociationCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + vcable, seclist := getSecurityAssociationResourceData(d) + + log.Printf("[DEBUG] Creating security association between vcable %s and security list %s", + vcable, seclist) + + client := meta.(*OPCClient).SecurityAssociations() + info, err := client.CreateSecurityAssociation(vcable, seclist) + if err != nil { + return fmt.Errorf("Error creating security association between vcable %s and security list %s: %s", + vcable, seclist, err) + } + + d.SetId(info.Name) + updateSecurityAssociationResourceData(d, info) + return nil +} + +func updateSecurityAssociationResourceData(d *schema.ResourceData, info *compute.SecurityAssociationInfo) { + d.Set("name", info.Name) + d.Set("seclist", info.SecList) + d.Set("vcable", info.VCable) +} + +func resourceSecurityAssociationRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityAssociations() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security association %s", name) + result, err := client.GetSecurityAssociation(name) + if err != nil { + // Security Association does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security association %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security association %s: %#v", name, result) + updateSecurityAssociationResourceData(d, result) + return nil +} + +func getSecurityAssociationResourceData(d *schema.ResourceData) (string, string) { + return d.Get("vcable").(string), d.Get("seclist").(string) +} + +func resourceSecurityAssociationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityAssociations() + name := d.Get("name").(string) + + vcable, seclist := getSecurityAssociationResourceData(d) + log.Printf("[DEBUG] Deleting security association %s between vcable %s and security list %s", + name, vcable, seclist) + + if err := client.DeleteSecurityAssociation(name); err != nil { + return fmt.Errorf("Error deleting security association %s between vcable %s and security list %s: %s", + name, vcable, seclist, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_association_test.go b/builtin/providers/oracleopc/resource_security_association_test.go new file mode 100644 index 000000000..604ef64cb --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_association_test.go @@ -0,0 +1,75 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceSecurityAssociation_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + associationResourceName, + testAccCheckAssociationDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccSecurityAssociationBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + associationResourceName, + testAccCheckAssociationExists), + ), + }, + }, + }) +} + +func testAccCheckAssociationExists(state *OPCResourceState) error { + associationName := getAssociationName(state) + + if _, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err != nil { + return fmt.Errorf("Error retrieving state of security assocation %s: %s", associationName, err) + } + + return nil +} + +func getAssociationName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckAssociationDestroyed(state *OPCResourceState) error { + associationName := getAssociationName(state) + if info, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err == nil { + return fmt.Errorf("Association %s still exists: %#v", associationName, info) + } + + return nil +} + +const associationName = "test_rule" + +var associationResourceName = fmt.Sprintf("opc_compute_security_association.%s", associationName) + +var testAccSecurityAssociationBasic = fmt.Sprintf(` +resource "opc_compute_security_list" "sec-list1" { + name = "sec-list-1" + policy = "PERMIT" + outbound_cidr_policy = "DENY" +} + +resource "opc_compute_security_association" "%s" { + vcable = "${opc_compute_instance.test-instance1.vcable}" + seclist = "${opc_compute_security_list.sec-list1.name}" +} + +resource "opc_compute_instance" "test-instance1" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" +} +`, ruleName) diff --git a/builtin/providers/oracleopc/resource_security_ip_list.go b/builtin/providers/oracleopc/resource_security_ip_list.go new file mode 100644 index 000000000..6a3e66b28 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_ip_list.go @@ -0,0 +1,117 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityIPList() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityIPListCreate, + Read: resourceSecurityIPListRead, + Update: resourceSecurityIPListUpdate, + Delete: resourceSecurityIPListDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_entries": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceSecurityIPListCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, ipEntries := getSecurityIPListResourceData(d) + + log.Printf("[DEBUG] Creating security IP list with name %s, entries %s", + name, ipEntries) + + client := meta.(*OPCClient).SecurityIPLists() + info, err := client.CreateSecurityIPList(name, ipEntries) + if err != nil { + return fmt.Errorf("Error creating security IP list %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityIPListResourceData(d, info) + return nil +} + +func updateSecurityIPListResourceData(d *schema.ResourceData, info *compute.SecurityIPListInfo) { + d.Set("name", info.Name) + d.Set("entries", info.SecIPEntries) +} + +func resourceSecurityIPListRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityIPLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security IP list %s", name) + result, err := client.GetSecurityIPList(name) + if err != nil { + // Security IP List does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security IP list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of security IP list %s: %#v", name, result) + updateSecurityIPListResourceData(d, result) + return nil +} + +func getSecurityIPListResourceData(d *schema.ResourceData) (string, []string) { + name := d.Get("name").(string) + ipEntries := d.Get("ip_entries").([]interface{}) + ipEntryStrings := []string{} + for _, entry := range ipEntries { + ipEntryStrings = append(ipEntryStrings, entry.(string)) + } + return name, ipEntryStrings +} + +func resourceSecurityIPListUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityIPLists() + name, entries := getSecurityIPListResourceData(d) + + log.Printf("[DEBUG] Updating security IP list %s with ip entries %s", + name, entries) + + info, err := client.UpdateSecurityIPList(name, entries) + if err != nil { + return fmt.Errorf("Error updating security IP list %s: %s", name, err) + } + + updateSecurityIPListResourceData(d, info) + return nil +} + +func resourceSecurityIPListDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityIPLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting security IP list %s", name) + if err := client.DeleteSecurityIPList(name); err != nil { + return fmt.Errorf("Error deleting security IP list %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_list.go b/builtin/providers/oracleopc/resource_security_list.go new file mode 100644 index 000000000..eea11bbb1 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_list.go @@ -0,0 +1,119 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityList() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityListCreate, + Read: resourceSecurityListRead, + Update: resourceSecurityListUpdate, + Delete: resourceSecurityListDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "outbound_cidr_policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSecurityListCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) + + log.Printf("[DEBUG] Creating security list with name %s, policy %s, outbound CIDR policy %s", + name, policy, outboundCIDRPolicy) + + client := meta.(*OPCClient).SecurityLists() + info, err := client.CreateSecurityList(name, policy, outboundCIDRPolicy) + if err != nil { + return fmt.Errorf("Error creating security list %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityListResourceData(d, info) + return nil +} + +func updateSecurityListResourceData(d *schema.ResourceData, info *compute.SecurityListInfo) { + d.Set("name", info.Name) + d.Set("policy", info.Policy) + d.Set("outbound_cidr_policy", info.OutboundCIDRPolicy) +} + +func resourceSecurityListRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security list %s", name) + result, err := client.GetSecurityList(name) + if err != nil { + // Security List does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSecurityListResourceData(d, result) + return nil +} + +func getSecurityListResourceData(d *schema.ResourceData) (string, string, string) { + return d.Get("name").(string), + d.Get("policy").(string), + d.Get("outbound_cidr_policy").(string) +} + +func resourceSecurityListUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityLists() + name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) + + log.Printf("[DEBUG] Updating security list %s with policy %s, outbound_cidr_policy %s", + name, policy, outboundCIDRPolicy) + + info, err := client.UpdateSecurityList(name, policy, outboundCIDRPolicy) + if err != nil { + return fmt.Errorf("Error updating security list %s: %s", name, err) + } + + updateSecurityListResourceData(d, info) + return nil +} + +func resourceSecurityListDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityLists() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSecurityList(name); err != nil { + return fmt.Errorf("Error deleting security list %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_rule.go b/builtin/providers/oracleopc/resource_security_rule.go new file mode 100644 index 000000000..0d9eb562c --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_rule.go @@ -0,0 +1,143 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSecurityRule() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityRuleCreate, + Read: resourceSecurityRuleRead, + Update: resourceSecurityRuleUpdate, + Delete: resourceSecurityRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_list": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination_list": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "application": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSecurityRuleCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) + + log.Printf("[DEBUG] Creating security list with name %s, sourceList %s, destinationList %s, application %s, action %s, disabled %s", + name, sourceList, destinationList, application, action, disabled) + + client := meta.(*OPCClient).SecurityRules() + info, err := client.CreateSecurityRule(name, sourceList, destinationList, application, action, disabled) + if err != nil { + return fmt.Errorf("Error creating security rule %s: %s", name, err) + } + + d.SetId(info.Name) + updateSecurityRuleResourceData(d, info) + return nil +} + +func updateSecurityRuleResourceData(d *schema.ResourceData, info *compute.SecurityRuleInfo) { + d.Set("name", info.Name) + d.Set("source_list", info.SourceList) + d.Set("destination_list", info.DestinationList) + d.Set("application", info.Application) + d.Set("action", info.Action) + d.Set("disabled", info.Disabled) +} + +func resourceSecurityRuleRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityRules() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of security rule %s", name) + result, err := client.GetSecurityRule(name) + if err != nil { + // Security Rule does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading security list %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSecurityRuleResourceData(d, result) + return nil +} + +func getSecurityRuleResourceData(d *schema.ResourceData) (string, string, string, string, string, bool) { + return d.Get("name").(string), + d.Get("source_list").(string), + d.Get("destination_list").(string), + d.Get("application").(string), + d.Get("action").(string), + d.Get("disabled").(bool) +} + +func resourceSecurityRuleUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + client := meta.(*OPCClient).SecurityRules() + name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) + + log.Printf("[DEBUG] Updating security list %s with sourceList %s, destinationList %s, application %s, action %s, disabled %s", + name, sourceList, destinationList, application, action, disabled) + + info, err := client.UpdateSecurityRule(name, sourceList, destinationList, application, action, disabled) + if err != nil { + return fmt.Errorf("Error updating security rule %s: %s", name, err) + } + + updateSecurityRuleResourceData(d, info) + return nil +} + +func resourceSecurityRuleDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*OPCClient).SecurityRules() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSecurityRule(name); err != nil { + return fmt.Errorf("Error deleting security rule %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_security_rule_test.go b/builtin/providers/oracleopc/resource_security_rule_test.go new file mode 100644 index 000000000..f09c2b879 --- /dev/null +++ b/builtin/providers/oracleopc/resource_security_rule_test.go @@ -0,0 +1,85 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCResourceSecurityRule_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + ruleResourceName, + testAccCheckRuleDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccSecurityRuleBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + ruleResourceName, + testAccCheckRuleExists), + ), + }, + }, + }) +} + +func testAccCheckRuleExists(state *OPCResourceState) error { + ruleName := getRuleName(state) + + if _, err := state.SecurityRules().GetSecurityRule(ruleName); err != nil { + return fmt.Errorf("Error retrieving state of security rule %s: %s", ruleName, err) + } + + return nil +} + +func getRuleName(rs *OPCResourceState) string { + return rs.Attributes["name"] +} + +func testAccCheckRuleDestroyed(state *OPCResourceState) error { + ruleName := getRuleName(state) + if info, err := state.SecurityRules().GetSecurityRule(ruleName); err == nil { + return fmt.Errorf("Rule %s still exists: %#v", ruleName, info) + } + + return nil +} + +const ruleName = "test_rule" +const secListName = "sec-list1" +const secIpListName = "sec-ip-list1" + +var ruleResourceName = fmt.Sprintf("opc_compute_security_rule.%s", ruleName) + +var testAccSecurityRuleBasic = fmt.Sprintf(` +resource "opc_compute_security_rule" "%s" { + name = "test" + source_list = "seclist:${opc_compute_security_list.sec-list1.name}" + destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" + action = "PERMIT" + application = "${opc_compute_security_application.spring-boot.name}" + disabled = false +} + +resource "opc_compute_security_list" "%s" { + name = "sec-list-1" + policy = "PERMIT" + outbound_cidr_policy = "DENY" +} + +resource "opc_compute_security_application" "spring-boot" { + name = "spring-boot" + protocol = "tcp" + dport = "8080" +} + +resource "opc_compute_security_ip_list" "%s" { + name = "sec-ip-list1" + ip_entries = ["217.138.34.4"] +} +`, ruleName, secListName, secIpListName) diff --git a/builtin/providers/oracleopc/resource_ssh_key.go b/builtin/providers/oracleopc/resource_ssh_key.go new file mode 100644 index 000000000..29f68b4aa --- /dev/null +++ b/builtin/providers/oracleopc/resource_ssh_key.go @@ -0,0 +1,117 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourceSSHKeyCreate, + Read: resourceSSHKeyRead, + Update: resourceSSHKeyUpdate, + Delete: resourceSSHKeyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + key := d.Get("key").(string) + enabled := d.Get("enabled").(bool) + + log.Printf("[DEBUG] Creating ssh key with name %s, key %s, enabled %s", + name, key, enabled) + + info, err := client.CreateSSHKey(name, key, enabled) + if err != nil { + return fmt.Errorf("Error creating ssh key %s: %s", name, err) + } + + d.SetId(info.Name) + updateSSHKeyResourceData(d, info) + return nil +} + +func updateSSHKeyResourceData(d *schema.ResourceData, info *compute.SSHKeyInfo) { + d.Set("name", info.Name) + d.Set("key", info.Key) + d.Set("enabled", info.Enabled) +} + +func resourceSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of ssh key %s", name) + result, err := client.GetSSHKey(name) + if err != nil { + // SSH Key does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading ssh key %s: %s", name, err) + } + + log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) + updateSSHKeyResourceData(d, result) + return nil +} + +func resourceSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + key := d.Get("key").(string) + enabled := d.Get("enabled").(bool) + + log.Printf("[DEBUG] Updating ssh key with name %s, key %s, enabled %s", + name, key, enabled) + + info, err := client.UpdateSSHKey(name, key, enabled) + if err != nil { + return fmt.Errorf("Error updating ssh key %s: %s", name, err) + } + + updateSSHKeyResourceData(d, info) + return nil +} + +func resourceSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + client := meta.(*OPCClient).SSHKeys() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting ssh key volume %s", name) + if err := client.DeleteSSHKey(name); err != nil { + return fmt.Errorf("Error deleting ssh key %s: %s", name, err) + } + return nil +} diff --git a/builtin/providers/oracleopc/resource_storage_volume.go b/builtin/providers/oracleopc/resource_storage_volume.go new file mode 100644 index 000000000..2d80d09f2 --- /dev/null +++ b/builtin/providers/oracleopc/resource_storage_volume.go @@ -0,0 +1,301 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/oracle/terraform-provider-compute/sdk/compute" + "log" +) + +func resourceStorageVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageVolumeCreate, + Read: resourceStorageVolumeRead, + Update: resourceStorageVolumeUpdate, + Delete: resourceStorageVolumeDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "sizeInBytes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "storage": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "/oracle/public/storage/default", + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "bootableImage": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "bootableImageVersion": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: -1, + }, + + "snapshot": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "snapshotId": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceStorageVolumeCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource data: %#v", d) + + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + properties := []string{d.Get("storage").(string)} + + spec := sv.NewStorageVolumeSpec( + d.Get("size").(string), + properties, + name) + + if d.Get("description").(string) != "" { + spec.SetDescription(d.Get("description").(string)) + } + + spec.SetTags(getTags(d)) + + if d.Get("bootableImage") != "" { + spec.SetBootableImage(d.Get("bootableImage").(string), d.Get("bootableImageVersion").(int)) + } + + if len(d.Get("snapshot").(*schema.Set).List()) > 0 { + snapshotDetails := d.Get("snapshot").(*schema.Set).List()[0].(map[string]interface{}) + spec.SetSnapshot( + snapshotDetails["name"].(string), + snapshotDetails["account"].(string), + ) + } + + if d.Get("snapshotId") != "" { + spec.SetSnapshotID(d.Get("snapshotId").(string)) + } + + log.Printf("[DEBUG] Creating storage volume %s with spec %#v", name, spec) + err := sv.CreateStorageVolume(spec) + if err != nil { + return fmt.Errorf("Error creating storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for storage volume %s to come online", name) + info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for storage volume %s to come online: %s", name, err) + } + + log.Printf("[DEBUG] Created storage volume %s: %#v", name, info) + + cachedAttachments, attachmentsFound := meta.(*OPCClient).storageAttachmentsByVolumeCache[name] + if attachmentsFound { + log.Printf("[DEBUG] Rebuilding storage attachments for volume %s", name) + for _, cachedAttachment := range cachedAttachments { + log.Printf("[DEBUG] Rebuilding storage attachments between volume %s and instance %s", + name, + cachedAttachment.instanceName) + + attachmentInfo, err := meta.(*OPCClient).StorageAttachments().CreateStorageAttachment( + cachedAttachment.index, + cachedAttachment.instanceName, + name, + ) + + if err != nil { + return fmt.Errorf( + "Error recreating storage attachment between volume %s and instance %s: %s", + name, + *cachedAttachment.instanceName, + err) + } + err = meta.(*OPCClient).StorageAttachments().WaitForStorageAttachmentCreated( + attachmentInfo.Name, + meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf( + "Error recreating storage attachment between volume %s and instance %s: %s", + name, + *cachedAttachment.instanceName, + err) + } + } + meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = nil + } + + d.SetId(name) + updateResourceData(d, info) + return nil +} + +func getTags(d *schema.ResourceData) []string { + tags := []string{} + for _, i := range d.Get("tags").([]interface{}) { + tags = append(tags, i.(string)) + } + return tags +} + +func updateResourceData(d *schema.ResourceData, info *compute.StorageVolumeInfo) error { + d.Set("name", info.Name) + d.Set("description", info.Description) + d.Set("storage", info.Properties[0]) + d.Set("sizeInBytes", info.Size) + d.Set("tags", info.Tags) + d.Set("bootableImage", info.ImageList) + d.Set("bootableImageVersion", info.ImageListEntry) + if info.Snapshot != "" { + d.Set("snapshot", map[string]interface{}{ + "name": info.Snapshot, + "account": info.SnapshotAccount, + }) + } + d.Set("snapshotId", info.SnapshotID) + + return nil +} + +func resourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading state of storage volume %s", name) + result, err := sv.GetStorageVolume(name) + if err != nil { + // Volume doesn't exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading storage volume %s: %s", name, err) + } + + if len(result.Result) == 0 { + // Volume doesn't exist + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read state of storage volume %s: %#v", name, &result.Result[0]) + updateResourceData(d, &result.Result[0]) + + return nil +} + +func resourceStorageVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + description := d.Get("description").(string) + size := d.Get("size").(string) + tags := getTags(d) + + log.Printf("[DEBUG] Updating storage volume %s with size %s, description %s, tags %#v", name, size, description, tags) + err := sv.UpdateStorageVolume(name, size, description, tags) + + if err != nil { + return fmt.Errorf("Error updating storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for updated storage volume %s to come online", name) + info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for updated storage volume %s to come online: %s", name, err) + } + + log.Printf("[DEBUG] Updated storage volume %s: %#v", name, info) + updateResourceData(d, info) + return nil +} + +func resourceStorageVolumeDelete(d *schema.ResourceData, meta interface{}) error { + sv := meta.(*OPCClient).StorageVolumes() + name := d.Get("name").(string) + + sva := meta.(*OPCClient).StorageAttachments() + attachments, err := sva.GetStorageAttachmentsForVolume(name) + if err != nil { + return fmt.Errorf("Error retrieving storage attachments for volume %s: %s", name, err) + } + + attachmentsToCache := make([]storageAttachment, len(*attachments)) + for index, attachment := range *attachments { + log.Printf("[DEBUG] Deleting storage attachment %s for volume %s", attachment.Name, name) + sva.DeleteStorageAttachment(attachment.Name) + sva.WaitForStorageAttachmentDeleted(attachment.Name, meta.(*OPCClient).MaxRetryTimeout) + attachmentsToCache[index] = storageAttachment{ + index: attachment.Index, + instanceName: compute.InstanceNameFromString(attachment.InstanceName), + } + } + meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = attachmentsToCache + + log.Printf("[DEBUG] Deleting storage volume %s", name) + err = sv.DeleteStorageVolume(name) + if err != nil { + return fmt.Errorf("Error deleting storage volume %s: %s", name, err) + } + + log.Printf("[DEBUG] Waiting for storage volume %s to finish deleting", name) + err = sv.WaitForStorageVolumeDeleted(name, meta.(*OPCClient).MaxRetryTimeout) + if err != nil { + return fmt.Errorf("Error waiting for storage volume %s to finish deleting: %s", name, err) + } + + log.Printf("[DEBUG] Deleted storage volume %s", name) + return nil +} diff --git a/builtin/providers/oracleopc/resource_storage_volume_test.go b/builtin/providers/oracleopc/resource_storage_volume_test.go new file mode 100644 index 000000000..d168b5309 --- /dev/null +++ b/builtin/providers/oracleopc/resource_storage_volume_test.go @@ -0,0 +1,70 @@ +package opc + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccOPCStorageVolume_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck( + "opc_compute_storage_volume.test_volume", + testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageVolumeBasic, + Check: resource.ComposeTestCheckFunc( + opcResourceCheck( + "opc_compute_storage_volume.test_volume", + testAccCheckStorageVolumeExists), + ), + }, + }, + }) +} + +func testAccCheckStorageVolumeExists(state *OPCResourceState) error { + sv := state.StorageVolumes() + volumeName := state.Attributes["name"] + + info, err := sv.GetStorageVolume(volumeName) + if err != nil { + return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) + } + + if len(info.Result) == 0 { + return fmt.Errorf("No info found for volume %s", volumeName) + } + + return nil +} + +func testAccCheckStorageVolumeDestroyed(state *OPCResourceState) error { + sv := state.StorageVolumes() + + volumeName := state.Attributes["name"] + + info, err := sv.GetStorageVolume(volumeName) + if err != nil { + return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) + } + + if len(info.Result) != 0 { + return fmt.Errorf("Volume %s still exists", volumeName) + } + + return nil +} + +const testAccStorageVolumeBasic = ` +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My volume" + name = "test_volume_b" + tags = ["foo", "bar", "baz"] +} +` From c5fa0404d69df9bd117b7b42fe8fbe9107d5bf3c Mon Sep 17 00:00:00 2001 From: Stephen Cross Date: Wed, 29 Mar 2017 16:37:14 +0000 Subject: [PATCH 015/342] Initial docs for Oracle Compute Cloud provider --- .../r/opc_compute_instance.html.markdown | 68 +++++++++++++++++++ .../opc_compute_ip_association.html.markdown | 31 +++++++++ .../opc_compute_ip_reservation.html.markdown | 33 +++++++++ ...compute_security_application.html.markdown | 39 +++++++++++ ...compute_security_association.html.markdown | 29 ++++++++ ...opc_compute_security_ip_list.html.markdown | 28 ++++++++ .../r/opc_compute_security_list.html.markdown | 33 +++++++++ .../r/opc_compute_security_rule.html.markdown | 46 +++++++++++++ .../r/opc_compute_ssh_key.html.markdown | 32 +++++++++ .../opc_compute_storage_volume.html.markdown | 49 +++++++++++++ 10 files changed, 388 insertions(+) create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown create mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown new file mode 100644 index 000000000..04762b5c3 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_instance" +sidebar_current: "docs-opc-resource-instance" +description: |- + Creates and manages an instance in an OPC identity domain. +--- + +# opc\_compute\_instance + +The ``opc_compute_instance`` resource creates and manages an instance in an OPC identity domain. + +~> **Caution:** The ``opc_compute_instance`` resource can completely delete your +instance just as easily as it can create it. To avoid costly accidents, +consider setting +[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) +on your instance resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_instance" "test_instance" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.key1.name}"] + attributes = "{\"foo\":\"bar\"}" + storage = [{ + index = 1 + volume = "${opc_compute_storage_volume.test_volume.name}" + }, + { + index = 2 + volume = "${opc_compute_storage_volume.test_volume2.name}" + }] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the instance. This need not be unique, as each instance is assigned a separate +computed `opcId`. + +* `shape` - (Required) The shape of the instance, e.g. `oc4`. + +* `imageList` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1` + +* `label` - (Optional) The label to apply to the instance. + +* `ip` - (Computed) The internal IP address assigned to the instance. + +* `opcId` - (Computed) The interned ID assigned to the instance. + +* `sshKeys` - (Optional) The names of the SSH Keys that can be used to log into the instance. + +* `attributes` - (Optional) An arbitrary JSON-formatted collection of attributes which is made available to the instance. + +* `vcable` - (Computed) The ID of the instance's VCable, which is used to associate it with reserved IP addresses and +add it to Security Lists. + +* `storage` - (Optional) A set of zero or more storage volumes to attach to the instance. Each volume has two arguments: +`index`, which is the volume's index in the instance's list of mounted volumes, and `name`, which is the name of the +storage volume to mount. + +* `bootOrder` - (Optional) The index number of the bootable storage volume that should be used to boot the instance. e.g. `[ 1 ]`. If you specify both `bootOrder` and `imageList`, the imagelist attribute is ignored. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown new file mode 100644 index 000000000..deeed76c4 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown @@ -0,0 +1,31 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ip_association" +sidebar_current: "docs-opc-resource-ip-association" +description: |- + Creates and manages an IP association in an OPC identity domain. +--- + +# opc\_compute\_ip\_association + +The ``opc_compute_ip_association`` resource creates and manages an association between an IP address and an instance in +an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_association" "instance1_reservation1" { + vcable = "${opc_compute_instance.test_instance.vcable}" + parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vcable` - (Required) The vcable of the instance to associate the IP address with. + +* `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use +the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the +prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown new file mode 100644 index 000000000..7c44c62ea --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ip_reservation" +sidebar_current: "docs-opc-resource-ip-assocation" +description: |- + Creates and manages an IP reservation in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_reservation" "reservation1" { + parentpool = "/oracle/public/ippool" + permanent = true + tags = [] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `parentpool` - (Required) The pool from which to allocate the IP address. + +* `permanent` - (Required) Whether the IP address remains reserved even when it is no longer associated with an instance +(if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or +deleted and recreated (if false). + +* `tags` - (Optional) List of tags that may be applied to the IP reservation. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown new file mode 100644 index 000000000..fe8c9ba3c --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_application" +sidebar_current: "docs-opc-resource-security-application" +description: |- + Creates and manages a security application in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_application" "tomcat" { + name = "tomcat" + protocol = "tcp" + dport = "8080" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the application + +* `protocol` - (Required) The protocol to enable for this application. Must be either one of +`tcp`, `udp`, `icmp`, `igmp`, `ipip`, `rdp`, `esp`, `ah`, `gre`, `icmpv6`, `ospf`, `pim`, `sctp`, `mplsip` or `all`, or +the corresponding integer in the range 0-254 from the list of [assigned protocol numbers](http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + +* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. + +* `icmptype` - (Optional) The ICMP type to enable for this application, if the `protocol` is `icmp`. Must be one of +`echo`, `reply`, `ttl`, `traceroute`, `unreachable`. + +* `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of +`network`, `host`, `protocol`, `port`, `df`, `admin`. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown new file mode 100644 index 000000000..170acc2ea --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown @@ -0,0 +1,29 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_association" +sidebar_current: "docs-opc-resource-security-association" +description: |- + Creates and manages a security association in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security +list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_association" "test_instance_sec_list_1" { + vcable = "${opc_compute_instance.test_instance.vcable}" + seclist = "${opc_compute_security_list.sec_list1.name}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vcable` - (Required) The `vcable` of the instance to associate to the security list. + +* `seclist` - (Required) The name of the security list to associate the instance to. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown new file mode 100644 index 000000000..bded4c30e --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown @@ -0,0 +1,28 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_ip_list" +sidebar_current: "docs-opc-resource-security-list" +description: |- + Creates and manages a security IP list in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_ip_list" "sec_ip_list1" { + name = "sec-ip-list1" + ip_entries = ["217.138.34.4"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security IP list. + +* `ip_entries` - (Required) The IP addresses to include in the list. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown new file mode 100644 index 000000000..7da5e5668 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_list" +sidebar_current: "docs-opc-resource-security-list" +description: |- + Creates and manages a security list in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_list" "sec_list1" { + name = "sec-list-1" + policy = "permit" + outbound_cidr_policy = "deny" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security list. + +* `policy` - (Required) The policy to apply to instances associated with this list. Must be one of `permit`, +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). + +* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown new file mode 100644 index 000000000..02c4b7533 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_security_rule" +sidebar_current: "docs-opc-resource-security-rule" +description: |- + Creates and manages a security rule in an OPC identity domain. +--- + +# opc\_compute\_ip\_reservation + +The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain, which joins +together a source security list (or security IP list), a destination security list (or security IP list), and a security +application. + +## Example Usage + +``` +resource "opc_compute_security_rule" "test_rule" { + name = "test" + source_list = "seclist:${opc_compute_security_list.sec-list1.name}" + destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" + action = "permit" + application = "${opc_compute_security_application.spring-boot.name}" + disabled = false +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within the identity domain) name of the security rule. + +* `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with +`seciplist:`). + + * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with + `seciplist:`). + +* `application` - (Required) The name of the application to which the rule applies. + +* `action` - (Required) Whether to `permit`, `refuse` or `deny` packets to which this rule applies. This will ordinarily +be `permit`. + +* `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule +without removing it outright from your Terraform resource definition. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown new file mode 100644 index 000000000..9655653a9 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_ssh_key" +sidebar_current: "docs-opc-resource-instance" +description: |- + Creates and manages an SSH key in an OPC identity domain. +--- + +# opc\_compute\_ssh_key + +The ``opc_compute_ssh_key`` resource creates and manages an SSH key in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ssh_key" "%s" { + name = "test-key" + key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..." + enabled = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within this identity domain) name of the SSH key. + +* `key` - (Required) The SSH key itself + +* `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, +without removing it entirely from your Terraform resource definition. \ No newline at end of file diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown new file mode 100644 index 000000000..0e91a8ad7 --- /dev/null +++ b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown @@ -0,0 +1,49 @@ +--- +layout: "oracle" +page_title: "Oracle: opc_compute_storage_volume" +sidebar_current: "docs-opc-resource-storage_volume" +description: |- + Creates and manages a storage volume in an OPC identity domain. +--- + +# opc\_compute\_storage\_volume + +The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain. + +~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your +storage volume just as easily as it can create it. To avoid costly accidents, +consider setting +[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) +on your storage volume resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_storage_volume" "test_volume" { + size = "3g" + description = "My storage volume" + name = "test_volume_a" + tags = ["xyzzy", "quux"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within this identity domain) name of the storage volume. + +* `size` - (Required) The size of the storage instance. + +* `description` - (Optional) A description of the storage volume. + +* `tags` - (Optional) A list of tags to apply to the storage volume. + +* `bootableImage` - (Optional) The name of the bootable image the storage volume is loaded with. + +* `bootableImageVersion` - (Optional) The version of the bootable image specified in `bootableImage` to use. + +* `snapshot` - (Optional) The snapshot to initialise the storage volume with. This has two nested properties: `name`, +for the name of the snapshot to use, and `account` for the name of the snapshot account to use. + +* `snapshotId` - (Optional) The id of the snapshot to initialise the storage volume with. From 2d7be9bb9ea9fd9435aba168a8dd4cc30ab36154 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 29 Mar 2017 19:30:04 +0200 Subject: [PATCH 016/342] Hooking up the OracleOPC Docs --- website/source/assets/stylesheets/_docs.scss | 1 + .../d/opc_compute_vnic.html.markdown | 36 +++++++++++ .../providers/oracleopc/index.html.markdown | 55 +++++++++++++++++ .../r/opc_compute_instance.html.markdown | 4 +- .../opc_compute_ip_association.html.markdown | 6 +- .../opc_compute_ip_reservation.html.markdown | 6 +- ...compute_security_application.html.markdown | 8 +-- ...compute_security_association.html.markdown | 8 +-- ...opc_compute_security_ip_list.html.markdown | 8 +-- .../r/opc_compute_security_list.html.markdown | 8 +-- .../r/opc_compute_security_rule.html.markdown | 8 +-- .../r/opc_compute_ssh_key.html.markdown | 6 +- .../opc_compute_storage_volume.html.markdown | 4 +- website/source/layouts/docs.erb | 4 ++ website/source/layouts/oracleopc.erb | 59 +++++++++++++++++++ 15 files changed, 188 insertions(+), 33 deletions(-) create mode 100644 website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown create mode 100644 website/source/docs/providers/oracleopc/index.html.markdown create mode 100644 website/source/layouts/oracleopc.erb diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 35f16eb60..9f2922c21 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -50,6 +50,7 @@ body.layout-nomad, body.layout-ns1, body.layout-openstack, body.layout-opsgenie, +body.layout-oracleopc, body.layout-packet, body.layout-pagerduty, body.layout-postgresql, diff --git a/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown b/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown new file mode 100644 index 000000000..78be49c4a --- /dev/null +++ b/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown @@ -0,0 +1,36 @@ +--- +layout: "oracleopc" +page_title: "Oracle: opc_compute_vnic" +sidebar_current: "docs-oracleopc-datasource-vnic" +description: |- + Gets information about the configuration of a Virtual NIC. +--- + +# opc\_compute\_vnic + +Use this data source to access the configuration of a Virtual NIC. + +## Example Usage + +``` +data "opc_compute_vnic" "current" {} + +output "mac_address" { + value = "${data.opc_compute_vnic.current.mac_address}" +} +``` + +## Argument Reference +* `name` is the name of the Virtual NIC. + +## Attributes Reference + +* `description` is a description of the Virtual NIC. + +* `mac_address` is the MAC Address of the Virtual NIC. + +* `tags` is a list of Tags associated with the Virtual NIC. + +* `transit_flag` is `true` if the Virtual NIC is of the type `transit`. + +* `uri` is the Unique Resource Locator of the Virtual NIC. diff --git a/website/source/docs/providers/oracleopc/index.html.markdown b/website/source/docs/providers/oracleopc/index.html.markdown new file mode 100644 index 000000000..598346919 --- /dev/null +++ b/website/source/docs/providers/oracleopc/index.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "oracleopc" +page_title: "Provider: Oracle Public Cloud" +sidebar_current: "docs-oracleopc-index" +description: |- + The Oracle Public Cloud provider is used to interact with the many resources supported by the Oracle Public Cloud. The provider needs to be configured with credentials for the Oracle Public Cloud API. +--- + +# Oracle Public Cloud Provider + +The Oracle Public Cloud provider is used to interact with the many resources supported by the Oracle Public Cloud. The provider needs to be configured with credentials for the Oracle Public Cloud API. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Oracle Public Cloud +provider "oracle" { + user = "..." + password = "..." + identity_domain = "..." + endpoint = "..." +} + +# Create an IP Reservation +resource "opc_compute_ip_reservation" "production" { + parent_pool = "/oracle/public/ippool" + permanent = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `user` - (Optional) The username to use, generally your email address. It can also + be sourced from the `OPC_USERNAME` environment variable. + +* `password` - (Optional) The password associated with the username to use. It can also be sourced from + the `OPC_PASSWORD` environment variable. + +* `identity_domain` - (Optional) The identity domain to use. It can also be sourced from + the `OPC_IDENTITY_DOMAIN` environment variable. + +* `endpoint` - (Optional) The API endpoint to use, associated with your Oracle Public Cloud account. This is known as the `REST Endpoint` within the Oracle portal. It can also be sourced from the `OPC_ENDPOINT` environment variable. + +Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000) +* `max_retry_timeout` - (Optional) The maximum number of seconds to wait for a successful response when operating on resources within Oracle Public Cloud. It can also be sourced from the `OPC_MAX_RETRY_TIMEOUT` environment variable. Defaults to 3000 seconds. + +## Testing + +Credentials must be provided via the `OPC_USERNAME`, `OPC_PASSWORD`, +`OPC_IDENTITY_DOMAIN` and `OPC_ENDPOINT` environment variables in order to run +acceptance tests. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown index 04762b5c3..faeb3ee7c 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_instance" -sidebar_current: "docs-opc-resource-instance" +sidebar_current: "docs-oracleopc-resource-instance" description: |- Creates and manages an instance in an OPC identity domain. --- diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown index deeed76c4..2518b2df1 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ip_association" -sidebar_current: "docs-opc-resource-ip-association" +sidebar_current: "docs-oracleopc-resource-ip-association" description: |- Creates and manages an IP association in an OPC identity domain. --- @@ -28,4 +28,4 @@ The following arguments are supported: * `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the -prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. \ No newline at end of file +prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown index 7c44c62ea..44b70cc0f 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ip_reservation" -sidebar_current: "docs-opc-resource-ip-assocation" +sidebar_current: "docs-oracleopc-resource-ip-reservation" description: |- Creates and manages an IP reservation in an OPC identity domain. --- @@ -30,4 +30,4 @@ The following arguments are supported: (if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or deleted and recreated (if false). -* `tags` - (Optional) List of tags that may be applied to the IP reservation. \ No newline at end of file +* `tags` - (Optional) List of tags that may be applied to the IP reservation. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown index fe8c9ba3c..94760f082 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_application" -sidebar_current: "docs-opc-resource-security-application" +sidebar_current: "docs-oracleopc-resource-security-application" description: |- Creates and manages a security application in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_application The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. @@ -36,4 +36,4 @@ the corresponding integer in the range 0-254 from the list of [assigned protocol `echo`, `reply`, `ttl`, `traceroute`, `unreachable`. * `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of -`network`, `host`, `protocol`, `port`, `df`, `admin`. \ No newline at end of file +`network`, `host`, `protocol`, `port`, `df`, `admin`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown index 170acc2ea..49207c879 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_association" -sidebar_current: "docs-opc-resource-security-association" +sidebar_current: "docs-oracleopc-resource-security-association" description: |- Creates and manages a security association in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_association The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security list in an OPC identity domain. @@ -26,4 +26,4 @@ The following arguments are supported: * `vcable` - (Required) The `vcable` of the instance to associate to the security list. -* `seclist` - (Required) The name of the security list to associate the instance to. \ No newline at end of file +* `seclist` - (Required) The name of the security list to associate the instance to. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown index bded4c30e..62f40d839 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_ip_list" -sidebar_current: "docs-opc-resource-security-list" +sidebar_current: "docs-oracleopc-resource-security-ip-list" description: |- Creates and manages a security IP list in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_ip\_list The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain. @@ -25,4 +25,4 @@ The following arguments are supported: * `name` - (Required) The unique (within the identity domain) name of the security IP list. -* `ip_entries` - (Required) The IP addresses to include in the list. \ No newline at end of file +* `ip_entries` - (Required) The IP addresses to include in the list. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown index 7da5e5668..64547a41e 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_list" -sidebar_current: "docs-opc-resource-security-list" +sidebar_current: "docs-oracleopc-resource-security-list" description: |- Creates and manages a security list in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_security\_list The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. @@ -30,4 +30,4 @@ The following arguments are supported: `reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). * `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, -`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). \ No newline at end of file +`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown index 02c4b7533..6497b0265 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_security_rule" -sidebar_current: "docs-opc-resource-security-rule" +sidebar_current: "docs-oracleopc-resource-security-rule" description: |- Creates and manages a security rule in an OPC identity domain. --- @@ -33,7 +33,7 @@ The following arguments are supported: * `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with `seciplist:`). - + * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with `seciplist:`). @@ -43,4 +43,4 @@ The following arguments are supported: be `permit`. * `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule -without removing it outright from your Terraform resource definition. \ No newline at end of file +without removing it outright from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown index 9655653a9..ff85467d8 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_ssh_key" -sidebar_current: "docs-opc-resource-instance" +sidebar_current: "docs-oracleopc-resource-ssh-key" description: |- Creates and manages an SSH key in an OPC identity domain. --- @@ -29,4 +29,4 @@ The following arguments are supported: * `key` - (Required) The SSH key itself * `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, -without removing it entirely from your Terraform resource definition. \ No newline at end of file +without removing it entirely from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown index 0e91a8ad7..4b30b59ed 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracle" +layout: "oracleopc" page_title: "Oracle: opc_compute_storage_volume" -sidebar_current: "docs-opc-resource-storage_volume" +sidebar_current: "docs-oracleopc-resource-storage-volume" description: |- Creates and manages a storage volume in an OPC identity domain. --- diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 1f42c1e32..77d5bf2c3 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -355,6 +355,10 @@ OpsGenie + > + Oracle OPC + + > Packet diff --git a/website/source/layouts/oracleopc.erb b/website/source/layouts/oracleopc.erb new file mode 100644 index 000000000..a9d9579f8 --- /dev/null +++ b/website/source/layouts/oracleopc.erb @@ -0,0 +1,59 @@ +<% wrap_layout :inner do %> +<% content_for :sidebar do %> + +<% end %> + +<%= yield %> +<% end %> From edc524df55965148e34ce21d6a15f5aeed4a5e26 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Mon, 3 Apr 2017 18:24:53 -0400 Subject: [PATCH 017/342] provider/opc: Update OPC Provider Updates the OPC provider to a fully working version. --- builtin/bins/provider-opc/main.go | 12 + builtin/providers/opc/config.go | 41 + .../opc/data_source_network_interface.go | 176 ++++ .../opc/data_source_network_interface_test.go | 103 ++ .../providers/opc/data_source_virtual_nic.go | 72 ++ .../opc/data_source_virtual_nic_test.go | 56 ++ builtin/providers/opc/helpers.go | 47 + builtin/providers/opc/import_acl_test.go | 58 ++ .../providers/opc/import_image_list_test.go | 57 ++ builtin/providers/opc/import_instance_test.go | 35 + .../opc/import_ip_address_prefix_set_test.go | 33 + .../opc/import_ip_address_reservation_test.go | 57 ++ .../opc/import_ip_association_test.go | 34 + .../opc/import_ip_network_exchange_test.go | 34 + .../opc/import_ip_reservation_test.go | 34 + builtin/providers/opc/import_route_test.go | 33 + builtin/providers/opc/import_sec_rule_test.go | 59 ++ .../opc/import_security_application_test.go | 59 ++ .../opc/import_security_association_test.go | 59 ++ .../opc/import_security_ip_list_test.go | 34 + .../opc/import_security_list_test.go | 59 ++ .../opc/import_security_protocol_test.go | 58 ++ .../opc/import_security_rule_test.go | 58 ++ builtin/providers/opc/import_ssh_key_test.go | 59 ++ builtin/providers/opc/provider.go | 89 ++ .../{oracleopc => opc}/provider_test.go | 7 +- builtin/providers/opc/resource_acl.go | 151 +++ builtin/providers/opc/resource_acl_test.go | 107 +++ builtin/providers/opc/resource_image_list.go | 107 +++ .../providers/opc/resource_image_list_test.go | 98 ++ builtin/providers/opc/resource_instance.go | 884 ++++++++++++++++++ .../providers/opc/resource_instance_test.go | 229 +++++ .../opc/resource_ip_address_prefix_set.go | 147 +++ .../resource_ip_address_prefix_set_test.go | 100 ++ .../opc/resource_ip_address_reservation.go | 137 +++ .../resource_ip_address_reservation_test.go | 77 ++ .../providers/opc/resource_ip_association.go | 96 ++ .../opc/resource_ip_association_test.go | 88 ++ builtin/providers/opc/resource_ip_network.go | 176 ++++ .../opc/resource_ip_network_exchange.go | 105 +++ .../opc/resource_ip_network_exchange_test.go | 73 ++ .../providers/opc/resource_ip_network_test.go | 91 ++ .../providers/opc/resource_ip_reservation.go | 117 +++ .../opc/resource_ip_reservation_test.go | 75 ++ builtin/providers/opc/resource_route.go | 176 ++++ builtin/providers/opc/resource_route_test.go | 115 +++ builtin/providers/opc/resource_sec_rule.go | 163 ++++ .../providers/opc/resource_sec_rule_test.go | 138 +++ .../opc/resource_security_application.go | 140 +++ .../opc/resource_security_application_test.go | 101 ++ .../opc/resource_security_association.go | 101 ++ .../opc/resource_security_association_test.go | 128 +++ .../opc/resource_security_ip_list.go | 135 +++ .../opc/resource_security_ip_list_test.go | 117 +++ .../providers/opc/resource_security_list.go | 141 +++ .../opc/resource_security_list_test.go | 100 ++ .../opc/resource_security_protocol.go | 159 ++++ .../opc/resource_security_protocol_test.go | 129 +++ .../providers/opc/resource_security_rule.go | 231 +++++ .../opc/resource_security_rule_test.go | 159 ++++ builtin/providers/opc/resource_ssh_key.go | 118 +++ .../providers/opc/resource_ssh_key_test.go | 152 +++ .../providers/opc/resource_storage_volume.go | 250 +++++ .../opc/resource_storage_volume_test.go | 206 ++++ builtin/providers/opc/resource_vnic_set.go | 169 ++++ .../providers/opc/resource_vnic_set_test.go | 113 +++ builtin/providers/opc/tags.go | 28 + builtin/providers/opc/validators.go | 67 ++ builtin/providers/opc/validators_test.go | 102 ++ builtin/providers/oracleopc/config.go | 47 - builtin/providers/oracleopc/provider.go | 75 -- .../providers/oracleopc/resource_instance.go | 306 ------ .../oracleopc/resource_instance_test.go | 156 ---- .../oracleopc/resource_ip_association.go | 103 -- .../oracleopc/resource_ip_association_test.go | 74 -- .../oracleopc/resource_ip_reservation.go | 122 --- .../resource_security_application.go | 124 --- .../resource_security_association.go | 103 -- .../resource_security_association_test.go | 75 -- .../oracleopc/resource_security_ip_list.go | 117 --- .../oracleopc/resource_security_list.go | 119 --- .../oracleopc/resource_security_rule.go | 143 --- .../oracleopc/resource_security_rule_test.go | 85 -- .../providers/oracleopc/resource_ssh_key.go | 117 --- .../oracleopc/resource_storage_volume.go | 301 ------ .../oracleopc/resource_storage_volume_test.go | 70 -- command/internal_plugin_list.go | 2 + .../hashicorp/go-oracle-terraform/LICENSE | 373 ++++++++ .../go-oracle-terraform/compute/acl.go | 138 +++ .../compute/authentication.go | 34 + .../go-oracle-terraform/compute/client.go | 238 +++++ .../go-oracle-terraform/compute/image_list.go | 154 +++ .../compute/image_list_entries.go | 122 +++ .../go-oracle-terraform/compute/instances.go | 540 +++++++++++ .../compute/ip_address_prefix_set.go | 135 +++ .../compute/ip_address_reservations.go | 190 ++++ .../compute/ip_associations.go | 118 +++ .../compute/ip_network_exchange.go | 99 ++ .../compute/ip_networks.go | 186 ++++ .../compute/ip_reservations.go | 147 +++ .../go-oracle-terraform/compute/logging.go | 28 + .../compute/resource_client.go | 94 ++ .../go-oracle-terraform/compute/routes.go | 153 +++ .../go-oracle-terraform/compute/sec_rules.go | 193 ++++ .../compute/security_applications.go | 150 +++ .../compute/security_associations.go | 95 ++ .../compute/security_ip_lists.go | 113 +++ .../compute/security_lists.go | 131 +++ .../compute/security_protocols.go | 187 ++++ .../compute/security_rules.go | 266 ++++++ .../go-oracle-terraform/compute/ssh_keys.go | 112 +++ .../compute/storage_volume_attachments.go | 158 ++++ .../compute/storage_volumes.go | 345 +++++++ .../go-oracle-terraform/compute/test_utils.go | 121 +++ .../compute/virtual_nic.go | 52 ++ .../compute/virtual_nic_sets.go | 154 +++ .../go-oracle-terraform/helper/testing.go | 44 + .../go-oracle-terraform/opc/config.go | 21 + .../go-oracle-terraform/opc/convert.go | 5 + .../go-oracle-terraform/opc/errors.go | 12 + .../go-oracle-terraform/opc/logger.go | 70 ++ vendor/vendor.json | 18 + website/source/assets/stylesheets/_docs.scss | 2 +- ...pc_compute_network_interface.html.markdown | 49 + .../d/opc_compute_vnic.html.markdown | 8 +- .../{oracleopc => opc}/index.html.markdown | 6 +- .../opc/r/opc_compute_acl.html.markdown | 45 + .../r/opc_compute_image_list.html.markdown | 39 + .../opc/r/opc_compute_instance.html.markdown | 169 ++++ ...ompute_ip_address_prefix_set.html.markdown | 45 + ...mpute_ip_address_reservation.html.markdown | 38 + .../opc_compute_ip_association.html.markdown | 19 +- .../r/opc_compute_ip_network.html.markdown | 54 ++ ..._compute_ip_network_exchange.html.markdown | 37 + .../opc_compute_ip_reservation.html.markdown | 16 +- .../opc/r/opc_compute_route.html.markdown | 60 ++ .../r/opc_compute_sec_rule.html.markdown} | 35 +- ...compute_security_application.html.markdown | 38 +- ...compute_security_association.html.markdown | 19 +- ...opc_compute_security_ip_list.html.markdown | 14 +- .../r/opc_compute_security_list.html.markdown | 22 +- ...pc_compute_security_protocol.html.markdown | 65 ++ .../r/opc_compute_security_rule.html.markdown | 62 ++ .../r/opc_compute_ssh_key.html.markdown | 16 +- .../opc_compute_storage_volume.html.markdown | 78 ++ .../opc/r/opc_compute_vnic_set.html.markdown | 45 + .../r/opc_compute_instance.html.markdown | 68 -- .../opc_compute_storage_volume.html.markdown | 49 - website/source/layouts/docs.erb | 4 +- website/source/layouts/opc.erb | 94 ++ website/source/layouts/oracleopc.erb | 59 -- 151 files changed, 13739 insertions(+), 2370 deletions(-) create mode 100644 builtin/bins/provider-opc/main.go create mode 100644 builtin/providers/opc/config.go create mode 100644 builtin/providers/opc/data_source_network_interface.go create mode 100644 builtin/providers/opc/data_source_network_interface_test.go create mode 100644 builtin/providers/opc/data_source_virtual_nic.go create mode 100644 builtin/providers/opc/data_source_virtual_nic_test.go create mode 100644 builtin/providers/opc/helpers.go create mode 100644 builtin/providers/opc/import_acl_test.go create mode 100644 builtin/providers/opc/import_image_list_test.go create mode 100644 builtin/providers/opc/import_instance_test.go create mode 100644 builtin/providers/opc/import_ip_address_prefix_set_test.go create mode 100644 builtin/providers/opc/import_ip_address_reservation_test.go create mode 100644 builtin/providers/opc/import_ip_association_test.go create mode 100644 builtin/providers/opc/import_ip_network_exchange_test.go create mode 100644 builtin/providers/opc/import_ip_reservation_test.go create mode 100644 builtin/providers/opc/import_route_test.go create mode 100644 builtin/providers/opc/import_sec_rule_test.go create mode 100644 builtin/providers/opc/import_security_application_test.go create mode 100644 builtin/providers/opc/import_security_association_test.go create mode 100644 builtin/providers/opc/import_security_ip_list_test.go create mode 100644 builtin/providers/opc/import_security_list_test.go create mode 100644 builtin/providers/opc/import_security_protocol_test.go create mode 100644 builtin/providers/opc/import_security_rule_test.go create mode 100644 builtin/providers/opc/import_ssh_key_test.go create mode 100644 builtin/providers/opc/provider.go rename builtin/providers/{oracleopc => opc}/provider_test.go (91%) create mode 100644 builtin/providers/opc/resource_acl.go create mode 100644 builtin/providers/opc/resource_acl_test.go create mode 100644 builtin/providers/opc/resource_image_list.go create mode 100644 builtin/providers/opc/resource_image_list_test.go create mode 100644 builtin/providers/opc/resource_instance.go create mode 100644 builtin/providers/opc/resource_instance_test.go create mode 100644 builtin/providers/opc/resource_ip_address_prefix_set.go create mode 100644 builtin/providers/opc/resource_ip_address_prefix_set_test.go create mode 100644 builtin/providers/opc/resource_ip_address_reservation.go create mode 100644 builtin/providers/opc/resource_ip_address_reservation_test.go create mode 100644 builtin/providers/opc/resource_ip_association.go create mode 100644 builtin/providers/opc/resource_ip_association_test.go create mode 100644 builtin/providers/opc/resource_ip_network.go create mode 100644 builtin/providers/opc/resource_ip_network_exchange.go create mode 100644 builtin/providers/opc/resource_ip_network_exchange_test.go create mode 100644 builtin/providers/opc/resource_ip_network_test.go create mode 100644 builtin/providers/opc/resource_ip_reservation.go create mode 100644 builtin/providers/opc/resource_ip_reservation_test.go create mode 100644 builtin/providers/opc/resource_route.go create mode 100644 builtin/providers/opc/resource_route_test.go create mode 100644 builtin/providers/opc/resource_sec_rule.go create mode 100644 builtin/providers/opc/resource_sec_rule_test.go create mode 100644 builtin/providers/opc/resource_security_application.go create mode 100644 builtin/providers/opc/resource_security_application_test.go create mode 100644 builtin/providers/opc/resource_security_association.go create mode 100644 builtin/providers/opc/resource_security_association_test.go create mode 100644 builtin/providers/opc/resource_security_ip_list.go create mode 100644 builtin/providers/opc/resource_security_ip_list_test.go create mode 100644 builtin/providers/opc/resource_security_list.go create mode 100644 builtin/providers/opc/resource_security_list_test.go create mode 100644 builtin/providers/opc/resource_security_protocol.go create mode 100644 builtin/providers/opc/resource_security_protocol_test.go create mode 100644 builtin/providers/opc/resource_security_rule.go create mode 100644 builtin/providers/opc/resource_security_rule_test.go create mode 100644 builtin/providers/opc/resource_ssh_key.go create mode 100644 builtin/providers/opc/resource_ssh_key_test.go create mode 100644 builtin/providers/opc/resource_storage_volume.go create mode 100644 builtin/providers/opc/resource_storage_volume_test.go create mode 100644 builtin/providers/opc/resource_vnic_set.go create mode 100644 builtin/providers/opc/resource_vnic_set_test.go create mode 100644 builtin/providers/opc/tags.go create mode 100644 builtin/providers/opc/validators.go create mode 100644 builtin/providers/opc/validators_test.go delete mode 100644 builtin/providers/oracleopc/config.go delete mode 100644 builtin/providers/oracleopc/provider.go delete mode 100644 builtin/providers/oracleopc/resource_instance.go delete mode 100644 builtin/providers/oracleopc/resource_instance_test.go delete mode 100644 builtin/providers/oracleopc/resource_ip_association.go delete mode 100644 builtin/providers/oracleopc/resource_ip_association_test.go delete mode 100644 builtin/providers/oracleopc/resource_ip_reservation.go delete mode 100644 builtin/providers/oracleopc/resource_security_application.go delete mode 100644 builtin/providers/oracleopc/resource_security_association.go delete mode 100644 builtin/providers/oracleopc/resource_security_association_test.go delete mode 100644 builtin/providers/oracleopc/resource_security_ip_list.go delete mode 100644 builtin/providers/oracleopc/resource_security_list.go delete mode 100644 builtin/providers/oracleopc/resource_security_rule.go delete mode 100644 builtin/providers/oracleopc/resource_security_rule_test.go delete mode 100644 builtin/providers/oracleopc/resource_ssh_key.go delete mode 100644 builtin/providers/oracleopc/resource_storage_volume.go delete mode 100644 builtin/providers/oracleopc/resource_storage_volume_test.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/LICENSE create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/client.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/logging.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/resource_client.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/helper/testing.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go create mode 100644 website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown rename website/source/docs/providers/{oracleopc => opc}/d/opc_compute_vnic.html.markdown (85%) rename website/source/docs/providers/{oracleopc => opc}/index.html.markdown (96%) create mode 100644 website/source/docs/providers/opc/r/opc_compute_acl.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_instance.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_ip_association.html.markdown (75%) create mode 100644 website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_ip_reservation.html.markdown (70%) create mode 100644 website/source/docs/providers/opc/r/opc_compute_route.html.markdown rename website/source/docs/providers/{oracleopc/r/opc_compute_security_rule.html.markdown => opc/r/opc_compute_sec_rule.html.markdown} (54%) rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_security_application.html.markdown (55%) rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_security_association.html.markdown (55%) rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_security_ip_list.html.markdown (71%) rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_security_list.html.markdown (68%) create mode 100644 website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown rename website/source/docs/providers/{oracleopc => opc}/r/opc_compute_ssh_key.html.markdown (72%) create mode 100644 website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown create mode 100644 website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown create mode 100644 website/source/layouts/opc.erb delete mode 100644 website/source/layouts/oracleopc.erb diff --git a/builtin/bins/provider-opc/main.go b/builtin/bins/provider-opc/main.go new file mode 100644 index 000000000..814a0b732 --- /dev/null +++ b/builtin/bins/provider-opc/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/opc" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: opc.Provider, + }) +} diff --git a/builtin/providers/opc/config.go b/builtin/providers/opc/config.go new file mode 100644 index 000000000..5c473e591 --- /dev/null +++ b/builtin/providers/opc/config.go @@ -0,0 +1,41 @@ +package opc + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/go-oracle-terraform/opc" +) + +type Config struct { + User string + Password string + IdentityDomain string + Endpoint string + MaxRetryTimeout int +} + +type OPCClient struct { + Client *compute.Client + MaxRetryTimeout int +} + +func (c *Config) Client() (*compute.Client, error) { + u, err := url.ParseRequestURI(c.Endpoint) + if err != nil { + return nil, fmt.Errorf("Invalid endpoint URI: %s", err) + } + + config := opc.Config{ + IdentityDomain: &c.IdentityDomain, + Username: &c.User, + Password: &c.Password, + APIEndpoint: u, + HTTPClient: http.DefaultClient, + } + + // TODO: http client wrapping / log level + return compute.NewComputeClient(&config) +} diff --git a/builtin/providers/opc/data_source_network_interface.go b/builtin/providers/opc/data_source_network_interface.go new file mode 100644 index 000000000..5a5b8fa78 --- /dev/null +++ b/builtin/providers/opc/data_source_network_interface.go @@ -0,0 +1,176 @@ +package opc + +import ( + "fmt" + "log" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceNetworkInterface() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNetworkInterfaceRead, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + }, + + "instance_name": { + Type: schema.TypeString, + Required: true, + }, + + "interface": { + Type: schema.TypeString, + Required: true, + }, + + // Computed Values returned from the data source lookup + "dns": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_network": { + Type: schema.TypeString, + Computed: true, + }, + + "mac_address": { + Type: schema.TypeString, + Computed: true, + }, + + "model": { + Type: schema.TypeString, + Computed: true, + }, + + "name_servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "nat": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "search_domains": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "sec_lists": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "shared_network": { + Type: schema.TypeBool, + Computed: true, + }, + + "vnic": { + Type: schema.TypeString, + Computed: true, + }, + + "vnic_sets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).Instances() + + // Get required attributes + instance_name := d.Get("instance_name").(string) + instance_id := d.Get("instance_id").(string) + targetInterface := d.Get("interface").(string) + + // Get instance + input := &compute.GetInstanceInput{ + Name: instance_name, + ID: instance_id, + } + + instance, err := client.GetInstance(input) + if err != nil { + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading instance %q: %v", instance_name, err) + } + + result := compute.NetworkingInfo{} + + // If the target instance has no network interfaces, return + if instance.Networking == nil { + d.SetId("") + return nil + } + + // Set the computed fields + result = instance.Networking[targetInterface] + + // Check if the target interface exists or not + if &result == nil { + log.Printf("[WARN] %q networking interface not found on instance %q", targetInterface, instance_name) + } + + d.SetId(fmt.Sprintf("%s-%s", instance_name, targetInterface)) + + // vNIC is a required field for an IP Network interface, and can only be set if the network + // interface is inside an IP Network. Use this key to determine shared_network status + if result.Vnic != "" { + d.Set("shared_network", false) + } else { + d.Set("shared_network", true) + } + + d.Set("ip_address", result.IPAddress) + d.Set("ip_network", result.IPNetwork) + d.Set("mac_address", result.MACAddress) + d.Set("model", result.Model) + d.Set("vnic", result.Vnic) + + if err := setStringList(d, "dns", result.DNS); err != nil { + return err + } + if err := setStringList(d, "name_servers", result.NameServers); err != nil { + return err + } + if err := setStringList(d, "nat", result.Nat); err != nil { + return err + } + if err := setStringList(d, "search_domains", result.SearchDomains); err != nil { + return err + } + if err := setStringList(d, "sec_lists", result.SecLists); err != nil { + return err + } + if err := setStringList(d, "vnic_sets", result.VnicSets); err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/opc/data_source_network_interface_test.go b/builtin/providers/opc/data_source_network_interface_test.go new file mode 100644 index 000000000..392748980 --- /dev/null +++ b/builtin/providers/opc/data_source_network_interface_test.go @@ -0,0 +1,103 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCDataSourceNetworkInterface_basic(t *testing.T) { + rInt := acctest.RandInt() + resName := "data.opc_compute_network_interface.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceNetworkInterfaceBasic(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resName, "ip_network", fmt.Sprintf("testing-ip-network-%d", rInt)), + resource.TestCheckResourceAttr(resName, "vnic", fmt.Sprintf("ip-network-test-%d", rInt)), + resource.TestCheckResourceAttr(resName, "shared_network", "false"), + ), + }, + }, + }) +} + +func TestAccOPCDataSourceNetworkInterface_sharedNetwork(t *testing.T) { + rInt := acctest.RandInt() + resName := "data.opc_compute_network_interface.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceNetworkInterfaceShared(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resName, "model", "e1000"), + resource.TestCheckResourceAttr(resName, "nat.#", "1"), + resource.TestCheckResourceAttr(resName, "shared_network", "true"), + resource.TestCheckResourceAttr(resName, "sec_lists.#", "1"), + resource.TestCheckResourceAttr(resName, "name_servers.#", "0"), + resource.TestCheckResourceAttr(resName, "vnic_sets.#", "0"), + ), + }, + }, + }) +} + +func testAccDataSourceNetworkInterfaceBasic(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-ip-network-%d" + description = "testing-ip-network-instance" + ip_address_prefix = "10.1.12.0/24" +} + +resource "opc_compute_instance" "test" { + name = "test-%d" + label = "test" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "ip-network-test-%d" + shared_network = false + } +} + +data "opc_compute_network_interface" "test" { + instance_name = "${opc_compute_instance.test.name}" + instance_id = "${opc_compute_instance.test.id}" + interface = "eth0" +}`, rInt, rInt, rInt) +} + +func testAccDataSourceNetworkInterfaceShared(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_instance" "test" { + name = "test-%d" + label = "test" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + tags = ["tag1", "tag2"] + networking_info { + index = 0 + model = "e1000" + nat = ["ippool:/oracle/public/ippool"] + shared_network = true + } +} + +data "opc_compute_network_interface" "test" { + instance_name = "${opc_compute_instance.test.name}" + instance_id = "${opc_compute_instance.test.id}" + interface = "eth0" +}`, rInt) +} diff --git a/builtin/providers/opc/data_source_virtual_nic.go b/builtin/providers/opc/data_source_virtual_nic.go new file mode 100644 index 000000000..2878cf489 --- /dev/null +++ b/builtin/providers/opc/data_source_virtual_nic.go @@ -0,0 +1,72 @@ +package opc + +import ( + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceVNIC() *schema.Resource { + return &schema.Resource{ + Read: dataSourceVNICRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "mac_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsComputedSchema(), + + "transit_flag": { + Type: schema.TypeBool, + Computed: true, + }, + + "uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceVNICRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).VirtNICs() + + name := d.Get("name").(string) + + input := &compute.GetVirtualNICInput{ + Name: name, + } + + vnic, err := client.GetVirtualNIC(input) + if err != nil { + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading vnic %s: %s", name, err) + } + + d.SetId(name) + d.Set("description", vnic.Description) + d.Set("mac_address", vnic.MACAddress) + d.Set("transit_flag", vnic.TransitFlag) + d.Set("uri", vnic.Uri) + if err := setStringList(d, "tags", vnic.Tags); err != nil { + return err + } + return nil +} diff --git a/builtin/providers/opc/data_source_virtual_nic_test.go b/builtin/providers/opc/data_source_virtual_nic_test.go new file mode 100644 index 000000000..6c52c06a3 --- /dev/null +++ b/builtin/providers/opc/data_source_virtual_nic_test.go @@ -0,0 +1,56 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCVNIC_Basic(t *testing.T) { + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccVnicBasic(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "data.opc_compute_vnic.foo", "mac_address", "02:5a:cd:ec:2e:4c"), + resource.TestCheckResourceAttr( + "data.opc_compute_vnic.foo", "transit_flag", "false"), + ), + }, + }, + }) +} + +func testAccVnicBasic(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-vnic-data-%d" + description = "testing-vnic-data" + ip_address_prefix = "10.1.13.0/24" +} + +resource "opc_compute_instance" "test" { + name = "test-%d" + label = "test" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-data-%d" + shared_network = false + mac_address = "02:5a:cd:ec:2e:4c" + } +} + +data "opc_compute_vnic" "foo" { + name = "test-vnic-data-%d" +}`, rInt, rInt, rInt, rInt) +} diff --git a/builtin/providers/opc/helpers.go b/builtin/providers/opc/helpers.go new file mode 100644 index 000000000..ea0123c96 --- /dev/null +++ b/builtin/providers/opc/helpers.go @@ -0,0 +1,47 @@ +package opc + +import ( + "sort" + + "github.com/hashicorp/terraform/helper/schema" +) + +// Helper function to get a string list from the schema, and alpha-sort it +func getStringList(d *schema.ResourceData, key string) []string { + if _, ok := d.GetOk(key); !ok { + return nil + } + l := d.Get(key).([]interface{}) + res := make([]string, len(l)) + for i, v := range l { + res[i] = v.(string) + } + sort.Strings(res) + return res +} + +// Helper function to set a string list in the schema, in an alpha-sorted order. +func setStringList(d *schema.ResourceData, key string, value []string) error { + sort.Strings(value) + return d.Set(key, value) +} + +// Helper function to get an int list from the schema, and numerically sort it +func getIntList(d *schema.ResourceData, key string) []int { + if _, ok := d.GetOk(key); !ok { + return nil + } + + l := d.Get(key).([]interface{}) + res := make([]int, len(l)) + for i, v := range l { + res[i] = v.(int) + } + sort.Ints(res) + return res +} + +func setIntList(d *schema.ResourceData, key string, value []int) error { + sort.Ints(value) + return d.Set(key, value) +} diff --git a/builtin/providers/opc/import_acl_test.go b/builtin/providers/opc/import_acl_test.go new file mode 100644 index 000000000..16b49fa93 --- /dev/null +++ b/builtin/providers/opc/import_acl_test.go @@ -0,0 +1,58 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCACL_importBasic(t *testing.T) { + resourceName := "opc_compute_acl.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccACLBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACLDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccOPCACL_importDisabled(t *testing.T) { + resourceName := "opc_compute_acl.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccACLDisabled, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACLDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_image_list_test.go b/builtin/providers/opc/import_image_list_test.go new file mode 100644 index 000000000..19877f95a --- /dev/null +++ b/builtin/providers/opc/import_image_list_test.go @@ -0,0 +1,57 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCImageList_importBasic(t *testing.T) { + resourceName := "opc_compute_image_list.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageList_basic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCImageList_importComplete(t *testing.T) { + resourceName := "opc_compute_image_list.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageList_complete, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_instance_test.go b/builtin/providers/opc/import_instance_test.go new file mode 100644 index 000000000..e3bbcc32c --- /dev/null +++ b/builtin/providers/opc/import_instance_test.go @@ -0,0 +1,35 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCInstance_importBasic(t *testing.T) { + rInt := acctest.RandInt() + + resourceName := "opc_compute_instance.test" + instanceName := fmt.Sprintf("acc-test-instance-%d", rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceBasic(rInt), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: instanceName + "/", + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ip_address_prefix_set_test.go b/builtin/providers/opc/import_ip_address_prefix_set_test.go new file mode 100644 index 000000000..09c53d845 --- /dev/null +++ b/builtin/providers/opc/import_ip_address_prefix_set_test.go @@ -0,0 +1,33 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPAddressPrefixSet_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_address_prefix_set.test" + + ri := acctest.RandInt() + config := testAccIPAddressPrefixSetBasic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPAddressPrefixSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ip_address_reservation_test.go b/builtin/providers/opc/import_ip_address_reservation_test.go new file mode 100644 index 000000000..bc67afa16 --- /dev/null +++ b/builtin/providers/opc/import_ip_address_reservation_test.go @@ -0,0 +1,57 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPAddressReservation_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_address_reservation.test" + + ri := acctest.RandInt() + config := testAccOPCIPAddressReservationConfig_Basic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckIPAddressReservationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccOPCIPAddressReservation_importDisabled(t *testing.T) { + resourceName := "opc_compute_ip_address_reservation.test" + + ri := acctest.RandInt() + config := testAccOPCIPAddressReservationConfig_Basic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckIPAddressReservationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ip_association_test.go b/builtin/providers/opc/import_ip_association_test.go new file mode 100644 index 000000000..cd1be015c --- /dev/null +++ b/builtin/providers/opc/import_ip_association_test.go @@ -0,0 +1,34 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPAssociation_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_association.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccIPAssociationBasic, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckIPAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ip_network_exchange_test.go b/builtin/providers/opc/import_ip_network_exchange_test.go new file mode 100644 index 000000000..c3abcb65e --- /dev/null +++ b/builtin/providers/opc/import_ip_network_exchange_test.go @@ -0,0 +1,34 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPNetworkExchange_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_network_exchange.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccIPNetworkExchangeBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPNetworkExchangeDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ip_reservation_test.go b/builtin/providers/opc/import_ip_reservation_test.go new file mode 100644 index 000000000..045bfcf16 --- /dev/null +++ b/builtin/providers/opc/import_ip_reservation_test.go @@ -0,0 +1,34 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPReservation_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_reservation.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccIPReservationBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPReservationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_route_test.go b/builtin/providers/opc/import_route_test.go new file mode 100644 index 000000000..8302c85ae --- /dev/null +++ b/builtin/providers/opc/import_route_test.go @@ -0,0 +1,33 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCRoute_importBasic(t *testing.T) { + resourceName := "opc_compute_route.test" + + ri := acctest.RandInt() + config := testAccOPCRouteConfig_Basic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckRouteDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_sec_rule_test.go b/builtin/providers/opc/import_sec_rule_test.go new file mode 100644 index 000000000..1db5b6b16 --- /dev/null +++ b/builtin/providers/opc/import_sec_rule_test.go @@ -0,0 +1,59 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecRule_importBasic(t *testing.T) { + resourceName := "opc_compute_sec_rule.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecRuleBasic, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecRuleDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSecRule_importComplete(t *testing.T) { + resourceName := "opc_compute_sec_rule.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecRuleComplete, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecRuleDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_application_test.go b/builtin/providers/opc/import_security_application_test.go new file mode 100644 index 000000000..35026c66a --- /dev/null +++ b/builtin/providers/opc/import_security_application_test.go @@ -0,0 +1,59 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityApplication_importICMP(t *testing.T) { + resourceName := "opc_compute_security_application.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityApplicationICMP, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSecurityApplicationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSecurityApplication_importTCP(t *testing.T) { + resourceName := "opc_compute_security_application.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityApplicationTCP, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSecurityApplicationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_association_test.go b/builtin/providers/opc/import_security_association_test.go new file mode 100644 index 000000000..35c98503f --- /dev/null +++ b/builtin/providers/opc/import_security_association_test.go @@ -0,0 +1,59 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityAssociation_importBasic(t *testing.T) { + resourceName := "opc_compute_security_association.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccSecurityAssociationBasic, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSecurityAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSecurityAssociation_importComplete(t *testing.T) { + resourceName := "opc_compute_security_association.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccSecurityAssociationComplete, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSecurityAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_ip_list_test.go b/builtin/providers/opc/import_security_ip_list_test.go new file mode 100644 index 000000000..b38c5a464 --- /dev/null +++ b/builtin/providers/opc/import_security_ip_list_test.go @@ -0,0 +1,34 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityIPList_importBasic(t *testing.T) { + resourceName := "opc_compute_security_ip_list.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityIPListBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityIPListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_list_test.go b/builtin/providers/opc/import_security_list_test.go new file mode 100644 index 000000000..0ac0d02d8 --- /dev/null +++ b/builtin/providers/opc/import_security_list_test.go @@ -0,0 +1,59 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityList_importBasic(t *testing.T) { + resourceName := "opc_compute_security_list.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityListBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSecurityList_importComplete(t *testing.T) { + resourceName := "opc_compute_security_list.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityListComplete, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_protocol_test.go b/builtin/providers/opc/import_security_protocol_test.go new file mode 100644 index 000000000..da1df1176 --- /dev/null +++ b/builtin/providers/opc/import_security_protocol_test.go @@ -0,0 +1,58 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityProtocol_importBasic(t *testing.T) { + resourceName := "opc_compute_security_protocol.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityProtocolBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityProtocolDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccOPCSecurityProtocol_importDisabled(t *testing.T) { + resourceName := "opc_compute_security_protocol.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityProtocolFull, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityProtocolDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_security_rule_test.go b/builtin/providers/opc/import_security_rule_test.go new file mode 100644 index 000000000..f3b98249e --- /dev/null +++ b/builtin/providers/opc/import_security_rule_test.go @@ -0,0 +1,58 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSecurityRule_importBasic(t *testing.T) { + resourceName := "opc_compute_security_rule.test" + + ri := acctest.RandInt() + config := testAccOPCSecurityRuleConfig_Basic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityRuleDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSecurityRule_importFull(t *testing.T) { + resourceName := "opc_compute_security_rule.test" + + ri := acctest.RandInt() + config := testAccOPCSecurityRuleConfig_Full(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityRuleDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/import_ssh_key_test.go b/builtin/providers/opc/import_ssh_key_test.go new file mode 100644 index 000000000..a52987ed8 --- /dev/null +++ b/builtin/providers/opc/import_ssh_key_test.go @@ -0,0 +1,59 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCSSHKey_importBasic(t *testing.T) { + resourceName := "opc_compute_ssh_key.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSSHKeyBasic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSSHKeyDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCSSHKey_importDisabled(t *testing.T) { + resourceName := "opc_compute_ssh_key.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSSHKeyDisabled, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckSSHKeyDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/provider.go b/builtin/providers/opc/provider.go new file mode 100644 index 000000000..46b4fcc95 --- /dev/null +++ b/builtin/providers/opc/provider.go @@ -0,0 +1,89 @@ +package opc + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "user": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_USERNAME", nil), + Description: "The user name for OPC API operations.", + }, + + "password": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_PASSWORD", nil), + Description: "The user password for OPC API operations.", + }, + + "identity_domain": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_IDENTITY_DOMAIN", nil), + Description: "The OPC identity domain for API operations", + }, + + "endpoint": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_ENDPOINT", nil), + Description: "The HTTP endpoint for OPC API operations.", + }, + + "max_retry_timeout": { + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OPC_MAX_RETRY_TIMEOUT", 3000), + Description: "Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000)", + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "opc_compute_network_interface": dataSourceNetworkInterface(), + "opc_compute_vnic": dataSourceVNIC(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "opc_compute_ip_network": resourceOPCIPNetwork(), + "opc_compute_acl": resourceOPCACL(), + "opc_compute_image_list": resourceOPCImageList(), + "opc_compute_instance": resourceInstance(), + "opc_compute_ip_address_reservation": resourceOPCIPAddressReservation(), + "opc_compute_ip_association": resourceOPCIPAssociation(), + "opc_compute_ip_network_exchange": resourceOPCIPNetworkExchange(), + "opc_compute_ip_reservation": resourceOPCIPReservation(), + "opc_compute_route": resourceOPCRoute(), + "opc_compute_security_application": resourceOPCSecurityApplication(), + "opc_compute_security_association": resourceOPCSecurityAssociation(), + "opc_compute_security_ip_list": resourceOPCSecurityIPList(), + "opc_compute_security_list": resourceOPCSecurityList(), + "opc_compute_security_rule": resourceOPCSecurityRule(), + "opc_compute_sec_rule": resourceOPCSecRule(), + "opc_compute_ssh_key": resourceOPCSSHKey(), + "opc_compute_storage_volume": resourceOPCStorageVolume(), + "opc_compute_vnic_set": resourceOPCVNICSet(), + "opc_compute_security_protocol": resourceOPCSecurityProtocol(), + "opc_compute_ip_address_prefix_set": resourceOPCIPAddressPrefixSet(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + User: d.Get("user").(string), + Password: d.Get("password").(string), + IdentityDomain: d.Get("identity_domain").(string), + Endpoint: d.Get("endpoint").(string), + MaxRetryTimeout: d.Get("max_retry_timeout").(int), + } + + return config.Client() +} diff --git a/builtin/providers/oracleopc/provider_test.go b/builtin/providers/opc/provider_test.go similarity index 91% rename from builtin/providers/oracleopc/provider_test.go rename to builtin/providers/opc/provider_test.go index c60076b06..8c2c842d8 100644 --- a/builtin/providers/oracleopc/provider_test.go +++ b/builtin/providers/opc/provider_test.go @@ -1,10 +1,11 @@ package opc import ( + "fmt" "os" "testing" - "fmt" + "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" @@ -40,7 +41,7 @@ func testAccPreCheck(t *testing.T) { } type OPCResourceState struct { - *OPCClient + *compute.Client *terraform.InstanceState } @@ -52,7 +53,7 @@ func opcResourceCheck(resourceName string, f func(checker *OPCResourceState) err } state := &OPCResourceState{ - OPCClient: testAccProvider.Meta().(*OPCClient), + Client: testAccProvider.Meta().(*compute.Client), InstanceState: rs.Primary, } diff --git a/builtin/providers/opc/resource_acl.go b/builtin/providers/opc/resource_acl.go new file mode 100644 index 000000000..55558b7fa --- /dev/null +++ b/builtin/providers/opc/resource_acl.go @@ -0,0 +1,151 @@ +package opc + +import ( + "fmt" + "log" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceOPCACL() *schema.Resource { + return &schema.Resource{ + Create: resourceOPCACLCreate, + Read: resourceOPCACLRead, + Update: resourceOPCACLUpdate, + Delete: resourceOPCACLDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceOPCACLCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + log.Print("[DEBUG] Creating acl") + + client := meta.(*compute.Client).ACLs() + input := compute.CreateACLInput{ + Name: d.Get("name").(string), + Enabled: d.Get("enabled").(bool), + } + + tags := getStringList(d, "tags") + if len(tags) != 0 { + input.Tags = tags + } + + if description, ok := d.GetOk("description"); ok { + input.Description = description.(string) + } + + info, err := client.CreateACL(&input) + if err != nil { + return fmt.Errorf("Error creating ACL: %s", err) + } + + d.SetId(info.Name) + return resourceOPCACLRead(d, meta) +} + +func resourceOPCACLRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*compute.Client).ACLs() + + log.Printf("[DEBUG] Reading state of ip reservation %s", d.Id()) + getInput := compute.GetACLInput{ + Name: d.Id(), + } + result, err := client.GetACL(&getInput) + if err != nil { + // ACL does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading acl %s: %s", d.Id(), err) + } + + log.Printf("[DEBUG] Read state of acl %s: %#v", d.Id(), result) + d.Set("name", result.Name) + d.Set("enabled", result.Enabled) + d.Set("description", result.Description) + d.Set("uri", result.URI) + if err := setStringList(d, "tags", result.Tags); err != nil { + return err + } + return nil +} + +func resourceOPCACLUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + + log.Print("[DEBUG] Updating acl") + + client := meta.(*compute.Client).ACLs() + input := compute.UpdateACLInput{ + Name: d.Get("name").(string), + Enabled: d.Get("enabled").(bool), + } + + tags := getStringList(d, "tags") + if len(tags) != 0 { + input.Tags = tags + } + + if description, ok := d.GetOk("description"); ok { + input.Description = description.(string) + } + + info, err := client.UpdateACL(&input) + if err != nil { + return fmt.Errorf("Error updating ACL: %s", err) + } + + d.SetId(info.Name) + return resourceOPCACLRead(d, meta) +} + +func resourceOPCACLDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Resource state: %#v", d.State()) + client := meta.(*compute.Client).ACLs() + name := d.Id() + + log.Printf("[DEBUG] Deleting ACL: %v", name) + + input := compute.DeleteACLInput{ + Name: name, + } + if err := client.DeleteACL(&input); err != nil { + return fmt.Errorf("Error deleting ACL") + } + return nil +} diff --git a/builtin/providers/opc/resource_acl_test.go b/builtin/providers/opc/resource_acl_test.go new file mode 100644 index 000000000..9a0293e68 --- /dev/null +++ b/builtin/providers/opc/resource_acl_test.go @@ -0,0 +1,107 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccOPCACL_Basic(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccACLBasic, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACLDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckACLExists, + ), + }, + }, + }) +} + +func TestAccOPCACL_Update(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccACLBasic, ri) + updatedConfig := fmt.Sprintf(testAccACLDisabled, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckACLDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckACLExists, + }, + { + Config: updatedConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckACLExists, + resource.TestCheckResourceAttr("opc_compute_acl.test", "enabled", "false"), + ), + }, + }, + }) +} + +func testAccCheckACLExists(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ACLs() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_acl" { + continue + } + + input := compute.GetACLInput{ + Name: rs.Primary.Attributes["name"], + } + if _, err := client.GetACL(&input); err != nil { + return fmt.Errorf("Error retrieving state of ACL %s: %s", input.Name, err) + } + } + + return nil +} + +func testAccCheckACLDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ACLs() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_acl" { + continue + } + + input := compute.GetACLInput{ + Name: rs.Primary.Attributes["name"], + } + if info, err := client.GetACL(&input); err == nil { + return fmt.Errorf("ACL %s still exists: %#v", input.Name, info) + } + } + + return nil +} + +var testAccACLBasic = ` +resource "opc_compute_acl" "test" { + name = "test_acl-%d" + description = "test acl" +} +` + +var testAccACLDisabled = ` +resource "opc_compute_acl" "test" { + name = "test_acl-%d" + description = "test acl" + enabled = false +} +` diff --git a/builtin/providers/opc/resource_image_list.go b/builtin/providers/opc/resource_image_list.go new file mode 100644 index 000000000..79038f7bc --- /dev/null +++ b/builtin/providers/opc/resource_image_list.go @@ -0,0 +1,107 @@ +package opc + +import ( + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceOPCImageList() *schema.Resource { + return &schema.Resource{ + Create: resourceOPCImageListCreate, + Read: resourceOPCImageListRead, + Update: resourceOPCImageListUpdate, + Delete: resourceOPCImageListDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Required: true, + }, + "default": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + }, + } +} + +func resourceOPCImageListCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageList() + + name := d.Get("name").(string) + + createInput := &compute.CreateImageListInput{ + Name: name, + Description: d.Get("description").(string), + Default: d.Get("default").(int), + } + + createResult, err := client.CreateImageList(createInput) + if err != nil { + return err + } + + d.SetId(createResult.Name) + + return resourceOPCImageListRead(d, meta) +} + +func resourceOPCImageListUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageList() + + name := d.Id() + + updateInput := &compute.UpdateImageListInput{ + Name: name, + Description: d.Get("description").(string), + Default: d.Get("default").(int), + } + + _, err := client.UpdateImageList(updateInput) + if err != nil { + return err + } + + return resourceOPCImageListRead(d, meta) +} + +func resourceOPCImageListRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageList() + + getInput := &compute.GetImageListInput{ + Name: d.Id(), + } + getResult, err := client.GetImageList(getInput) + if err != nil { + return err + } + + d.Set("name", getResult.Name) + d.Set("description", getResult.Description) + d.Set("default", getResult.Default) + + return nil +} + +func resourceOPCImageListDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageList() + + deleteInput := &compute.DeleteImageListInput{ + Name: d.Id(), + } + err := client.DeleteImageList(deleteInput) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/opc/resource_image_list_test.go b/builtin/providers/opc/resource_image_list_test.go new file mode 100644 index 000000000..072cf4225 --- /dev/null +++ b/builtin/providers/opc/resource_image_list_test.go @@ -0,0 +1,98 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccOPCImageList_Basic(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageList_basic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckImageListExists, + }, + }, + }) +} + +func TestAccOPCImageList_Complete(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageList_complete, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckImageListExists, + }, + }, + }) +} + +func testAccCheckImageListExists(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ImageList() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_image_list" { + continue + } + + input := compute.GetImageListInput{ + Name: rs.Primary.Attributes["name"], + } + if _, err := client.GetImageList(&input); err != nil { + return fmt.Errorf("Error retrieving state of Image List %s: %s", input.Name, err) + } + } + + return nil +} + +func testAccCheckImageListDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ImageList() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_image_list" { + continue + } + + input := compute.GetImageListInput{ + Name: rs.Primary.Attributes["name"], + } + if info, err := client.GetImageList(&input); err == nil { + return fmt.Errorf("Image List %s still exists: %#v", input.Name, info) + } + } + + return nil +} + +var testAccImageList_basic = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-image-list-basic-%d" + description = "Image List (Basic)" +} +` + +var testAccImageList_complete = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-image-list-complete-%d" + description = "Image List (Complete)" + default = 2 +} +` diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go new file mode 100644 index 000000000..8b682cf96 --- /dev/null +++ b/builtin/providers/opc/resource_instance.go @@ -0,0 +1,884 @@ +package opc + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceInstanceCreate, + Read: resourceInstanceRead, + Delete: resourceInstanceDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + combined := strings.Split(d.Id(), "/") + if len(combined) != 2 { + return nil, fmt.Errorf("Invalid ID specified. Must be in the form of instance_name/instance_id. Got: %s", d.Id()) + } + d.Set("name", combined[0]) + d.SetId(combined[1]) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + ///////////////////////// + // Required Attributes // + ///////////////////////// + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "shape": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + ///////////////////////// + // Optional Attributes // + ///////////////////////// + "instance_attributes": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return true + }, + }, + + "boot_order": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "image_list": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "label": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "networking_info": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns": { + // Required for Shared Network Interface, will default if unspecified, however + // Optional for IP Network Interface + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "index": { + Type: schema.TypeInt, + ForceNew: true, + Required: true, + }, + + "ip_address": { + // Optional, IP Network only + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "ip_network": { + // Required for an IP Network Interface + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "mac_address": { + // Optional, IP Network Only + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + }, + + "model": { + // Required, Shared Network only. + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "e1000" { + errors = append(errors, fmt.Errorf("Model needs to be set to 'e1000', got: %s", value)) + } + return + }, + }, + + "name_servers": { + // Optional, IP Network + Shared Network + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "nat": { + // Optional for IP Network + // Required for Shared Network + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "search_domains": { + // Optional, IP Network + Shared Network + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "sec_lists": { + // Required, Shared Network only. Will default if unspecified however + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "shared_network": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "vnic": { + // Optional, IP Network only. + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "vnic_sets": { + // Optional, IP Network only. + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["index"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["vnic"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["nat"])) + buf.WriteString(fmt.Sprintf("%s-", m["model"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "reverse_dns": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "ssh_keys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "storage": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "volume": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "tags": tagsForceNewSchema(), + + ///////////////////////// + // Computed Attributes // + ///////////////////////// + "attributes": { + Type: schema.TypeString, + Computed: true, + }, + + "availability_domain": { + Type: schema.TypeString, + Computed: true, + }, + + "domain": { + Type: schema.TypeString, + Computed: true, + }, + + "entry": { + Type: schema.TypeInt, + Computed: true, + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + + "image_format": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + + "placement_requirements": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "platform": { + Type: schema.TypeString, + Computed: true, + }, + + "priority": { + Type: schema.TypeString, + Computed: true, + }, + + "quota_reservation": { + Type: schema.TypeString, + Computed: true, + }, + + "relationships": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "resolvers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "site": { + Type: schema.TypeString, + Computed: true, + }, + + "start_time": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "vcable_id": { + Type: schema.TypeString, + Computed: true, + }, + + "virtio": { + Type: schema.TypeBool, + Computed: true, + }, + + "vnc_address": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).Instances() + + // Get Required Attributes + input := &compute.CreateInstanceInput{ + Name: d.Get("name").(string), + Shape: d.Get("shape").(string), + } + + // Get optional instance attributes + if attributes, err := getInstanceAttributes(d); err != nil && attributes != nil { + input.Attributes = attributes + } + + if bootOrder := getIntList(d, "boot_order"); len(bootOrder) > 0 { + input.BootOrder = bootOrder + } + + if v, ok := d.GetOk("hostname"); ok { + input.Hostname = v.(string) + } + + if v, ok := d.GetOk("image_list"); ok { + input.ImageList = v.(string) + } + + if v, ok := d.GetOk("label"); ok { + input.Label = v.(string) + } + + interfaces, err := readNetworkInterfacesFromConfig(d) + if err != nil { + return err + } + if interfaces != nil { + input.Networking = interfaces + } + + if v, ok := d.GetOk("reverse_dns"); ok { + input.ReverseDNS = v.(bool) + } + + if sshKeys := getStringList(d, "ssh_keys"); len(sshKeys) > 0 { + input.SSHKeys = sshKeys + } + + // TODO Add storage things + //storage := getStorageAttachments(d) + + if tags := getStringList(d, "tags"); len(tags) > 0 { + input.Tags = tags + } + + result, err := client.CreateInstance(input) + if err != nil { + return fmt.Errorf("Error creating instance %s: %s", input.Name, err) + } + + log.Printf("[DEBUG] Created instance %s: %#v", result.ID) + + d.SetId(result.ID) + + return resourceInstanceRead(d, meta) +} + +func resourceInstanceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).Instances() + + name := d.Get("name").(string) + + input := &compute.GetInstanceInput{ + ID: d.Id(), + Name: name, + } + + log.Printf("[DEBUG] Reading state of instance %s", name) + result, err := client.GetInstance(input) + if err != nil { + // Instance doesn't exist + if compute.WasNotFoundError(err) { + log.Printf("[DEBUG] Instance %s not found", name) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading instance %s: %s", name, err) + } + + log.Printf("[DEBUG] Instance '%s' found", name) + + // Update attributes + return updateInstanceAttributes(d, result) +} + +func updateInstanceAttributes(d *schema.ResourceData, instance *compute.InstanceInfo) error { + d.Set("name", instance.Name) + d.Set("shape", instance.Shape) + + if err := setInstanceAttributes(d, instance.Attributes); err != nil { + return err + } + + if attrs, ok := d.GetOk("instance_attributes"); ok && attrs != nil { + d.Set("instance_attributes", attrs.(string)) + } + + if err := setIntList(d, "boot_order", instance.BootOrder); err != nil { + return err + } + d.Set("hostname", instance.Hostname) + d.Set("image_list", instance.ImageList) + d.Set("label", instance.Label) + + if err := readNetworkInterfaces(d, instance.Networking); err != nil { + return err + } + + d.Set("reverse_dns", instance.ReverseDNS) + if err := setStringList(d, "ssh_keys", instance.SSHKeys); err != nil { + return err + } + + // TODO Set Storage + + if err := setStringList(d, "tags", instance.Tags); err != nil { + return err + } + d.Set("availability_domain", instance.AvailabilityDomain) + d.Set("domain", instance.Domain) + d.Set("entry", instance.Entry) + d.Set("fingerprint", instance.Fingerprint) + d.Set("image_format", instance.ImageFormat) + d.Set("ip_address", instance.IPAddress) + + if err := setStringList(d, "placement_requirements", instance.PlacementRequirements); err != nil { + return err + } + + d.Set("platform", instance.Platform) + d.Set("priority", instance.Priority) + d.Set("quota_reservation", instance.QuotaReservation) + + if err := setStringList(d, "relationships", instance.Relationships); err != nil { + return err + } + + if err := setStringList(d, "resolvers", instance.Resolvers); err != nil { + return err + } + + d.Set("site", instance.Site) + d.Set("start_time", instance.StartTime) + d.Set("state", instance.State) + + if err := setStringList(d, "tags", instance.Tags); err != nil { + return err + } + + d.Set("vcable_id", instance.VCableID) + d.Set("virtio", instance.Virtio) + d.Set("vnc_address", instance.VNC) + + return nil +} + +func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).Instances() + + name := d.Get("name").(string) + + input := &compute.DeleteInstanceInput{ + ID: d.Id(), + Name: name, + } + log.Printf("[DEBUG] Deleting instance %s", name) + + if err := client.DeleteInstance(input); err != nil { + return fmt.Errorf("Error deleting instance %s: %s", name, err) + } + + return nil +} + +// TODO Uncomment this when working on storage +/* +func getStorageAttachments(d *schema.ResourceData) []compute.StorageAttachment { + storageAttachments := []compute.StorageAttachment{} + storage := d.Get("storage").(*schema.Set) + for _, i := range storage.List() { + attrs := i.(map[string]interface{}) + storageAttachments = append(storageAttachments, compute.StorageAttachmentInput{ + Index: attrs["index"].(int), + Volume: attrs["volume"].(string), + }) + } + return storageAttachments +}*/ + +// Parses instance_attributes from a string to a map[string]interface and returns any errors. +func getInstanceAttributes(d *schema.ResourceData) (map[string]interface{}, error) { + var attrs map[string]interface{} + + // Empty instance attributes + attributes, ok := d.GetOk("instance_attributes") + if !ok { + return attrs, nil + } + + if err := json.Unmarshal([]byte(attributes.(string)), &attrs); err != nil { + return attrs, fmt.Errorf("Cannot parse attributes as json: %s", err) + } + + return attrs, nil +} + +// Reads attributes from the returned instance object, and sets the computed attributes string +// as JSON +func setInstanceAttributes(d *schema.ResourceData, attributes map[string]interface{}) error { + // Shouldn't ever get nil attributes on an instance, but protect against the case either way + if attributes == nil { + return nil + } + + b, err := json.Marshal(attributes) + if err != nil { + return fmt.Errorf("Error marshalling returned attributes: %s", err) + } + return d.Set("attributes", string(b)) +} + +// Populates and validates shared network and ip network interfaces to return the of map +// objects needed to create/update an instance's networking_info +func readNetworkInterfacesFromConfig(d *schema.ResourceData) (map[string]compute.NetworkingInfo, error) { + interfaces := make(map[string]compute.NetworkingInfo) + + if v, ok := d.GetOk("networking_info"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + ni := v.(map[string]interface{}) + index, ok := ni["index"].(int) + if !ok { + return nil, fmt.Errorf("Index not specified for network interface: %v", ni) + } + + deviceIndex := fmt.Sprintf("eth%d", index) + + // Verify that the network interface doesn't already exist + if _, ok := interfaces[deviceIndex]; ok { + return nil, fmt.Errorf("Duplicate Network interface at eth%d already specified", index) + } + + // Determine if we're creating a shared network interface or an IP Network interface + info := compute.NetworkingInfo{} + var err error + if ni["shared_network"].(bool) { + // Populate shared network parameters + info, err = readSharedNetworkFromConfig(ni) + } else { + // Populate IP Network Parameters + info, err = readIPNetworkFromConfig(ni) + } + if err != nil { + return nil, err + } + // And you may find yourself in a beautiful house, with a beautiful wife + // And you may ask yourself, well, how did I get here? + interfaces[deviceIndex] = info + } + } + + return interfaces, nil +} + +// Reads a networking_info config block as a shared network interface +func readSharedNetworkFromConfig(ni map[string]interface{}) (compute.NetworkingInfo, error) { + info := compute.NetworkingInfo{} + // Validate the shared network + if err := validateSharedNetwork(ni); err != nil { + return info, err + } + // Populate shared network fields; checking type casting + dns := []string{} + if v, ok := ni["dns"]; ok && v != nil { + for _, d := range v.([]interface{}) { + dns = append(dns, d.(string)) + } + if len(dns) > 0 { + info.DNS = dns + } + } + + if v, ok := ni["model"].(string); ok && v != "" { + info.Model = compute.NICModel(v) + } + + nats := []string{} + if v, ok := ni["nat"]; ok && v != nil { + for _, nat := range v.([]interface{}) { + nats = append(nats, nat.(string)) + } + if len(nats) > 0 { + info.Nat = nats + } + } + + slists := []string{} + if v, ok := ni["sec_lists"]; ok && v != nil { + for _, slist := range v.([]interface{}) { + slists = append(slists, slist.(string)) + } + if len(slists) > 0 { + info.SecLists = slists + } + } + + nservers := []string{} + if v, ok := ni["name_servers"]; ok && v != nil { + for _, nserver := range v.([]interface{}) { + nservers = append(nservers, nserver.(string)) + } + if len(nservers) > 0 { + info.NameServers = nservers + } + } + + sdomains := []string{} + if v, ok := ni["search_domains"]; ok && v != nil { + for _, sdomain := range v.([]interface{}) { + sdomains = append(sdomains, sdomain.(string)) + } + if len(sdomains) > 0 { + info.SearchDomains = sdomains + } + } + + return info, nil +} + +// Unfortunately this cannot take place during plan-phase, because we currently cannot have a validation +// function based off of multiple fields in the supplied schema. +func validateSharedNetwork(ni map[string]interface{}) error { + // A Shared Networking Interface MUST have the following attributes set: + // - "model" + // - "nat" + // The following attributes _cannot_ be set for a shared network: + // - "ip_address" + // - "ip_network" + // - "mac_address" + // - "vnic" + // - "vnic_sets" + if d, ok := ni["model"]; !ok || d.(string) == "" { + return fmt.Errorf("'model' field needs to be set for a Shared Networking Interface") + } + + if _, ok := ni["nat"]; !ok { + return fmt.Errorf("'nat' field needs to be set for a Shared Networking Interface") + } + + // Strings only + nilAttrs := []string{ + "ip_address", + "ip_network", + "mac_address", + "vnic", + } + + for _, v := range nilAttrs { + if d, ok := ni[v]; ok && d.(string) != "" { + return fmt.Errorf("%q field cannot be set in a Shared Networking Interface", v) + } + } + if _, ok := ni["vnic_sets"].([]string); ok { + return fmt.Errorf("%q field cannot be set in a Shared Networking Interface", "vnic_sets") + } + + return nil +} + +// Populates fields for an IP Network +func readIPNetworkFromConfig(ni map[string]interface{}) (compute.NetworkingInfo, error) { + info := compute.NetworkingInfo{} + // Validate the IP Network + if err := validateIPNetwork(ni); err != nil { + return info, err + } + // Populate fields + if v, ok := ni["ip_network"].(string); ok && v != "" { + info.IPNetwork = v + } + + dns := []string{} + if v, ok := ni["dns"]; ok && v != nil { + for _, d := range v.([]interface{}) { + dns = append(dns, d.(string)) + } + if len(dns) > 0 { + info.DNS = dns + } + } + + if v, ok := ni["ip_address"].(string); ok && v != "" { + info.IPAddress = v + } + + if v, ok := ni["mac_address"].(string); ok && v != "" { + info.MACAddress = v + } + + nservers := []string{} + if v, ok := ni["name_servers"]; ok && v != nil { + for _, nserver := range v.([]interface{}) { + nservers = append(nservers, nserver.(string)) + } + if len(nservers) > 0 { + info.NameServers = nservers + } + } + + nats := []string{} + if v, ok := ni["nat"]; ok && v != nil { + for _, nat := range v.([]interface{}) { + nats = append(nats, nat.(string)) + } + if len(nats) > 0 { + info.Nat = nats + } + } + + sdomains := []string{} + if v, ok := ni["search_domains"]; ok && v != nil { + for _, sdomain := range v.([]interface{}) { + sdomains = append(sdomains, sdomain.(string)) + } + if len(sdomains) > 0 { + info.SearchDomains = sdomains + } + } + + if v, ok := ni["vnic"].(string); ok && v != "" { + info.Vnic = v + } + + vnicSets := []string{} + if v, ok := ni["vnic_sets"]; ok && v != nil { + for _, vnic := range v.([]interface{}) { + vnicSets = append(vnicSets, vnic.(string)) + } + if len(vnicSets) > 0 { + info.VnicSets = vnicSets + } + } + + return info, nil +} + +// Validates an IP Network config block +func validateIPNetwork(ni map[string]interface{}) error { + // An IP Networking Interface MUST have the following attributes set: + // - "ip_network" + // The following attributes _cannot_ be set for an IP Network: + // - "model" + + // Required to be set + if d, ok := ni["ip_network"]; !ok || d.(string) == "" { + return fmt.Errorf("'ip_network' field is required for an IP Network interface") + } + + // Requird to be unset + if d, ok := ni["model"]; ok && d.(string) != "" { + return fmt.Errorf("'model' cannot be set in an IP Network Interface") + } + + return nil +} + +// Reads network interfaces from the config +func readNetworkInterfaces(d *schema.ResourceData, ifaces map[string]compute.NetworkingInfo) error { + result := make([]map[string]interface{}, 0) + + // Nil check for import case + if ifaces == nil { + return d.Set("networking_info", result) + } + + for _, iface := range ifaces { + res := make(map[string]interface{}) + if iface.DNS != nil { + res["dns"] = iface.DNS + } + if iface.IPAddress != "" { + res["ip_address"] = iface.IPAddress + } + if iface.IPNetwork != "" { + res["ip_network"] = iface.IPNetwork + } + if iface.MACAddress != "" { + res["mac_address"] = iface.MACAddress + } + if iface.Model != "" { + res["model"] = iface.Model + // Model can only be set on Shared networks + res["shared_network"] = true + } + if iface.NameServers != nil { + res["name_servers"] = iface.NameServers + } + if iface.Nat != nil { + res["nat"] = iface.Nat + } + if iface.SearchDomains != nil { + res["search_domains"] = iface.SearchDomains + } + if iface.SecLists != nil { + res["sec_lists"] = iface.SecLists + } + if iface.Vnic != "" { + res["vnic"] = iface.Vnic + // VNIC can only be set on an IP Network + res["shared_network"] = false + } + if iface.VnicSets != nil { + res["vnic_sets"] = iface.VnicSets + } + + result = append(result, res) + } + + return d.Set("networking_info", result) +} diff --git a/builtin/providers/opc/resource_instance_test.go b/builtin/providers/opc/resource_instance_test.go new file mode 100644 index 000000000..00bb66e7e --- /dev/null +++ b/builtin/providers/opc/resource_instance_test.go @@ -0,0 +1,229 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccOPCInstance_basic(t *testing.T) { + resName := "opc_compute_instance.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceBasic(rInt), + Check: resource.ComposeTestCheckFunc( + testAccOPCCheckInstanceExists, + resource.TestCheckResourceAttr(resName, "name", fmt.Sprintf("acc-test-instance-%d", rInt)), + resource.TestCheckResourceAttr(resName, "label", "TestAccOPCInstance_basic"), + ), + }, + }, + }) +} + +func TestAccOPCInstance_sharedNetworking(t *testing.T) { + rInt := acctest.RandInt() + resName := "opc_compute_instance.test" + dataName := "data.opc_compute_network_interface.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceSharedNetworking(rInt), + Check: resource.ComposeTestCheckFunc( + testAccOPCCheckInstanceExists, + resource.TestCheckResourceAttrSet(resName, "id"), + resource.TestCheckResourceAttrSet(resName, "availability_domain"), + resource.TestCheckResourceAttrSet(resName, "domain"), + resource.TestCheckResourceAttrSet(resName, "hostname"), + resource.TestCheckResourceAttrSet(resName, "ip_address"), + resource.TestCheckResourceAttr(resName, "name", fmt.Sprintf("acc-test-instance-%d", rInt)), + resource.TestCheckResourceAttr(resName, "networking_info.#", "1"), + // Default Placement Reqs + resource.TestCheckResourceAttr(resName, "placement_requirements.#", "2"), + resource.TestCheckResourceAttr(resName, "placement_requirements.0", "/system/compute/allow_instances"), + resource.TestCheckResourceAttr(resName, "placement_requirements.1", "/system/compute/placement/default"), + resource.TestCheckResourceAttr(resName, "platform", "linux"), + resource.TestCheckResourceAttr(resName, "priority", "/oracle/public/default"), + resource.TestCheckResourceAttr(resName, "reverse_dns", "true"), + resource.TestCheckResourceAttr(resName, "state", "running"), + resource.TestCheckResourceAttr(resName, "tags.#", "2"), + resource.TestCheckResourceAttrSet(resName, "vcable_id"), + resource.TestCheckResourceAttr(resName, "virtio", "false"), + + // Check Data Source to validate networking attributes + resource.TestCheckResourceAttr(dataName, "shared_network", "true"), + resource.TestCheckResourceAttr(dataName, "nat.#", "1"), + resource.TestCheckResourceAttr(dataName, "model", "e1000"), + resource.TestCheckResourceAttr(dataName, "sec_lists.#", "1"), + resource.TestCheckResourceAttr(dataName, "name_servers.#", "0"), + resource.TestCheckResourceAttr(dataName, "vnic_sets.#", "0"), + ), + }, + }, + }) +} + +func TestAccOPCInstance_ipNetwork(t *testing.T) { + rInt := acctest.RandInt() + resName := "opc_compute_instance.test" + dataName := "data.opc_compute_network_interface.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceIPNetworking(rInt), + Check: resource.ComposeTestCheckFunc( + testAccOPCCheckInstanceExists, + resource.TestCheckResourceAttrSet(resName, "id"), + resource.TestCheckResourceAttrSet(resName, "availability_domain"), + resource.TestCheckResourceAttrSet(resName, "domain"), + resource.TestCheckResourceAttrSet(resName, "ip_address"), + resource.TestCheckResourceAttr(resName, "name", fmt.Sprintf("acc-test-instance-%d", rInt)), + resource.TestCheckResourceAttr(resName, "networking_info.#", "1"), + // Default Placement Reqs + resource.TestCheckResourceAttr(resName, "placement_requirements.#", "2"), + resource.TestCheckResourceAttr(resName, "placement_requirements.0", "/system/compute/allow_instances"), + resource.TestCheckResourceAttr(resName, "placement_requirements.1", "/system/compute/placement/default"), + resource.TestCheckResourceAttr(resName, "platform", "linux"), + resource.TestCheckResourceAttr(resName, "priority", "/oracle/public/default"), + resource.TestCheckResourceAttr(resName, "reverse_dns", "true"), + resource.TestCheckResourceAttr(resName, "state", "running"), + resource.TestCheckResourceAttr(resName, "virtio", "false"), + + // Check Data Source to validate networking attributes + resource.TestCheckResourceAttr(dataName, "ip_network", fmt.Sprintf("testing-ip-network-%d", rInt)), + resource.TestCheckResourceAttr(dataName, "vnic", fmt.Sprintf("ip-network-test-%d", rInt)), + resource.TestCheckResourceAttr(dataName, "shared_network", "false"), + ), + }, + }, + }) +} + +func testAccOPCCheckInstanceExists(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).Instances() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_instance" { + continue + } + + input := &compute.GetInstanceInput{ + ID: rs.Primary.ID, + Name: rs.Primary.Attributes["name"], + } + _, err := client.GetInstance(input) + if err != nil { + return fmt.Errorf("Error retrieving state of Instance %s: %s", input.Name, err) + } + } + + return nil +} + +func testAccOPCCheckInstanceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).Instances() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_instance" { + continue + } + + input := &compute.GetInstanceInput{ + ID: rs.Primary.ID, + Name: rs.Primary.Attributes["name"], + } + if info, err := client.GetInstance(input); err == nil { + return fmt.Errorf("Instance %s still exists: %#v", input.Name, info) + } + } + + return nil +} + +const validSSHKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" + +func testAccInstanceBasic(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_instance" "test" { + name = "acc-test-instance-%d" + label = "TestAccOPCInstance_basic" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + instance_attributes = < 2 { + errors = append(errors, fmt.Errorf( + "%q can only be an interger between 0-2. Got: %d", k, value)) + } + return +} + +// Admin distance can either be a 0, 1, or a 2. Defaults to 0. +func validateIPProtocol(v interface{}, k string) (ws []string, errors []error) { + validProtocols := map[string]struct{}{ + string(compute.All): {}, + string(compute.AH): {}, + string(compute.ESP): {}, + string(compute.ICMP): {}, + string(compute.ICMPV6): {}, + string(compute.IGMP): {}, + string(compute.IPIP): {}, + string(compute.GRE): {}, + string(compute.MPLSIP): {}, + string(compute.OSPF): {}, + string(compute.PIM): {}, + string(compute.RDP): {}, + string(compute.SCTP): {}, + string(compute.TCP): {}, + string(compute.UDP): {}, + } + + value := v.(string) + if _, ok := validProtocols[value]; !ok { + errors = append(errors, fmt.Errorf( + `%q must contain a valid Image owner , expected ["all", "ah", "esp", "icmp", "icmpv6", "igmp", "ipip", "gre", "mplsip", "ospf", "pim", "rdp", "sctp", "tcp", "udp"] got %q`, + k, value)) + } + return +} diff --git a/builtin/providers/opc/validators_test.go b/builtin/providers/opc/validators_test.go new file mode 100644 index 000000000..96df22312 --- /dev/null +++ b/builtin/providers/opc/validators_test.go @@ -0,0 +1,102 @@ +package opc + +import "testing" + +func TestValidateIPPrefixCIDR(t *testing.T) { + validPrefixes := []string{ + "10.0.1.0/24", + "10.1.0.0/16", + "192.168.0.1/32", + "10.20.0.0/18", + "10.0.12.0/24", + } + + for _, v := range validPrefixes { + _, errors := validateIPPrefixCIDR(v, "prefix") + if len(errors) != 0 { + t.Fatalf("%q should be a valid IP Address Prefix: %q", v, errors) + } + } + + invalidPrefixes := []string{ + "10.0.0.1/35", + "192.168.1.256/16", + "256.0.1/16", + } + + for _, v := range invalidPrefixes { + _, errors := validateIPPrefixCIDR(v, "prefix") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid IP Address", v) + } + } +} + +func TestValidateAdminDistance(t *testing.T) { + validDistances := []int{ + 0, + 1, + 2, + } + + for _, v := range validDistances { + _, errors := validateAdminDistance(v, "distance") + if len(errors) != 0 { + t.Fatalf("%q should be a valid Admin Distance: %q", v, errors) + } + } + + invalidDistances := []int{ + -1, + 4, + 3, + 42, + } + + for _, v := range invalidDistances { + _, errors := validateAdminDistance(v, "distance") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid Admin Distance", v) + } + } +} + +func TestValidateIPProtocol(t *testing.T) { + validProtocols := []string{ + "all", + "ah", + "esp", + "icmp", + "icmpv6", + "igmp", + "ipip", + "gre", + "mplsip", + "ospf", + "pim", + "rdp", + "sctp", + "tcp", + "udp", + } + + for _, v := range validProtocols { + _, errors := validateIPProtocol(v, "ip_protocol") + if len(errors) != 0 { + t.Fatalf("%q should be a valid Admin Distance: %q", v, errors) + } + } + + invalidProtocols := []string{ + "bad", + "real bad", + "are you even trying at this point?", + } + for _, v := range invalidProtocols { + _, errors := validateIPProtocol(v, "ip_protocol") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid IP Protocol", v) + } + } + +} diff --git a/builtin/providers/oracleopc/config.go b/builtin/providers/oracleopc/config.go deleted file mode 100644 index fbae3b5d5..000000000 --- a/builtin/providers/oracleopc/config.go +++ /dev/null @@ -1,47 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "net/url" -) - -type Config struct { - User string - Password string - IdentityDomain string - Endpoint string - MaxRetryTimeout int -} - -type storageAttachment struct { - index int - instanceName *compute.InstanceName -} - -type OPCClient struct { - *compute.AuthenticatedClient - MaxRetryTimeout int - storageAttachmentsByVolumeCache map[string][]storageAttachment -} - -func (c *Config) Client() (*OPCClient, error) { - u, err := url.ParseRequestURI(c.Endpoint) - if err != nil { - return nil, fmt.Errorf("Invalid endpoint URI: %s", err) - } - - client := compute.NewComputeClient(c.IdentityDomain, c.User, c.Password, u) - authenticatedClient, err := client.Authenticate() - if err != nil { - return nil, fmt.Errorf("Authentication failed: %s", err) - } - - opcClient := &OPCClient{ - AuthenticatedClient: authenticatedClient, - MaxRetryTimeout: c.MaxRetryTimeout, - storageAttachmentsByVolumeCache: make(map[string][]storageAttachment), - } - - return opcClient, nil -} diff --git a/builtin/providers/oracleopc/provider.go b/builtin/providers/oracleopc/provider.go deleted file mode 100644 index a6d0d3fb5..000000000 --- a/builtin/providers/oracleopc/provider.go +++ /dev/null @@ -1,75 +0,0 @@ -package opc - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_USERNAME", nil), - Description: "The user name for OPC API operations.", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_PASSWORD", nil), - Description: "The user password for OPC API operations.", - }, - - "identityDomain": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_IDENTITY_DOMAIN", nil), - Description: "The OPC identity domain for API operations", - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_ENDPOINT", nil), - Description: "The HTTP endpoint for OPC API operations.", - }, - - "maxRetryTimeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_MAX_RETRY_TIMEOUT", 3000), - Description: "Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000)", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "opc_compute_storage_volume": resourceStorageVolume(), - "opc_compute_instance": resourceInstance(), - "opc_compute_ssh_key": resourceSSHKey(), - "opc_compute_security_application": resourceSecurityApplication(), - "opc_compute_security_list": resourceSecurityList(), - "opc_compute_security_ip_list": resourceSecurityIPList(), - "opc_compute_ip_reservation": resourceIPReservation(), - "opc_compute_ip_association": resourceIPAssociation(), - "opc_compute_security_rule": resourceSecurityRule(), - "opc_compute_security_association": resourceSecurityAssociation(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - User: d.Get("user").(string), - Password: d.Get("password").(string), - IdentityDomain: d.Get("identityDomain").(string), - Endpoint: d.Get("endpoint").(string), - MaxRetryTimeout: d.Get("maxRetryTimeout").(int), - } - - return config.Client() -} diff --git a/builtin/providers/oracleopc/resource_instance.go b/builtin/providers/oracleopc/resource_instance.go deleted file mode 100644 index 70f3b99c8..000000000 --- a/builtin/providers/oracleopc/resource_instance.go +++ /dev/null @@ -1,306 +0,0 @@ -package opc - -import ( - "encoding/json" - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceInstanceCreate, - Read: resourceInstanceRead, - Delete: resourceInstanceDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "shape": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "imageList": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "label": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip": { - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - - "opcId": { - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - - "sshKeys": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "attributes": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vcable": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "storage": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "volume": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "bootOrder": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - }, - } -} - -func getAttrs(d *schema.ResourceData) (*map[string]interface{}, error) { - var attrs map[string]interface{} - - attrString := d.Get("attributes").(string) - if attrString == "" { - return &attrs, nil - } - if err := json.Unmarshal([]byte(attrString), &attrs); err != nil { - return &attrs, fmt.Errorf("Cannot parse '%s' as json", attrString) - } - return &attrs, nil -} - -func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - shape := d.Get("shape").(string) - imageList := d.Get("imageList").(string) - label := d.Get("label").(string) - storage := getStorageAttachments(d) - sshKeys := getSSHKeys(d) - bootOrder := getBootOrder(d) - - attrs, err := getAttrs(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating instance with name %s, shape %s, imageList %s, storage %s, bootOrder %s, label %s, sshKeys %s, attrs %#v", - name, shape, imageList, storage, bootOrder, label, sshKeys, attrs) - - id, err := client.LaunchInstance(name, label, shape, imageList, storage, bootOrder, sshKeys, *attrs) - if err != nil { - return fmt.Errorf("Error creating instance %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for instance %s to come online", id.String()) - info, err := client.WaitForInstanceRunning(id, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for instance %s to come online: %s", id, err) - } - - log.Printf("[DEBUG] Created instance %s: %#v", id, info) - - attachStorage( - &compute.InstanceName{ - Name: info.Name, - ID: info.ID, - }, - d, meta) - - d.SetId(info.Name) - updateInstanceResourceData(d, info) - return nil -} - -func attachStorage(name *compute.InstanceName, d *schema.ResourceData, meta interface{}) error { - storageClient := meta.(*OPCClient).StorageAttachments() - storage := d.Get("storage").(*schema.Set) - updatedStorage := schema.NewSet(storage.F, []interface{}{}) - - for _, i := range storage.List() { - attrs := i.(map[string]interface{}) - attachmentInfo, err := storageClient.CreateStorageAttachment( - attrs["index"].(int), - name, - attrs["volume"].(string)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for storage attachment %#v to come online", attachmentInfo) - storageClient.WaitForStorageAttachmentCreated(attachmentInfo.Name, meta.(*OPCClient).MaxRetryTimeout) - log.Printf("[DEBUG] Storage attachment %s: %s-%s created", - attachmentInfo.Name, attachmentInfo.InstanceName, attachmentInfo.StorageVolumeName) - attrs["name"] = attachmentInfo.Name - updatedStorage.Add(attrs) - } - - d.Set("storage", updatedStorage) - return nil -} - -func getSSHKeys(d *schema.ResourceData) []string { - sshKeys := []string{} - for _, i := range d.Get("sshKeys").([]interface{}) { - sshKeys = append(sshKeys, i.(string)) - } - return sshKeys -} - -func getBootOrder(d *schema.ResourceData) []int { - bootOrder := []int{} - for _, i := range d.Get("bootOrder").([]interface{}) { - bootOrder = append(bootOrder, i.(int)) - } - return bootOrder -} - -func getStorageAttachments(d *schema.ResourceData) []compute.LaunchPlanStorageAttachmentSpec { - storageAttachments := []compute.LaunchPlanStorageAttachmentSpec{} - storage := d.Get("storage").(*schema.Set) - for _, i := range storage.List() { - attrs := i.(map[string]interface{}) - storageAttachments = append(storageAttachments, compute.LaunchPlanStorageAttachmentSpec{ - Index: attrs["index"].(int), - Volume: attrs["volume"].(string), - }) - } - return storageAttachments -} - -func updateInstanceResourceData(d *schema.ResourceData, info *compute.InstanceInfo) error { - d.Set("name", info.Name) - d.Set("opcId", info.ID) - d.Set("imageList", info.ImageList) - d.Set("bootOrder", info.BootOrder) - d.Set("sshKeys", info.SSHKeys) - d.Set("label", info.Label) - d.Set("ip", info.IPAddress) - d.Set("vcable", info.VCableID) - - return nil -} - -func resourceInstanceRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - instanceName := &compute.InstanceName{ - Name: name, - ID: d.Get("opcId").(string), - } - - log.Printf("[DEBUG] Reading state of instance %s", instanceName) - result, err := client.GetInstance(instanceName) - if err != nil { - // Instance doesn't exist - if compute.WasNotFoundError(err) { - log.Printf("[DEBUG] Instance %s not found", instanceName) - d.SetId("") - return nil - } - return fmt.Errorf("Error reading instance %s: %s", instanceName, err) - } - - log.Printf("[DEBUG] Read state of instance %s: %#v", instanceName, result) - - attachments, err := meta.(*OPCClient).StorageAttachments().GetStorageAttachmentsForInstance(instanceName) - if err != nil { - return fmt.Errorf("Error reading storage attachments for instance %s: %s", instanceName, err) - } - updateInstanceResourceData(d, result) - updateAttachmentResourceData(d, attachments) - return nil -} - -func updateAttachmentResourceData(d *schema.ResourceData, attachments *[]compute.StorageAttachmentInfo) { - attachmentSet := schema.NewSet(d.Get("storage").(*schema.Set).F, []interface{}{}) - for _, attachment := range *attachments { - properties := map[string]interface{}{ - "index": attachment.Index, - "volume": attachment.StorageVolumeName, - "name": attachment.Name, - } - attachmentSet.Add(properties) - } - d.Set("storage", attachmentSet) -} - -func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - - instanceName := &compute.InstanceName{ - Name: name, - ID: d.Get("opcId").(string), - } - - log.Printf("[DEBUG] Deleting instance %s", instanceName) - if err := client.DeleteInstance(instanceName); err != nil { - return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) - } - if err := client.WaitForInstanceDeleted(instanceName, meta.(*OPCClient).MaxRetryTimeout); err != nil { - return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) - } - - for _, attachment := range d.Get("storage").(*schema.Set).List() { - name := attachment.(map[string]interface{})["name"].(string) - log.Printf("[DEBUG] Deleting storage attachment %s", name) - client.StorageAttachments().DeleteStorageAttachment(name) - client.StorageAttachments().WaitForStorageAttachmentDeleted(name, meta.(*OPCClient).MaxRetryTimeout) - } - - return nil -} diff --git a/builtin/providers/oracleopc/resource_instance_test.go b/builtin/providers/oracleopc/resource_instance_test.go deleted file mode 100644 index 6f386af84..000000000 --- a/builtin/providers/oracleopc/resource_instance_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "testing" -) - -func TestAccOPCInstance_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - instanceResourceName, - testAccCheckInstanceDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccInstanceBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - instanceResourceName, - testAccCheckInstanceExists), - opcResourceCheck( - keyResourceName, - testAccCheckSSHKeyExists), - ), - }, - { - Config: modifySSHKey, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - instanceResourceName, - testAccCheckInstanceExists), - opcResourceCheck( - keyResourceName, - testAccCheckSSHKeyUpdated), - ), - }, - }, - }) -} - -func testAccCheckInstanceExists(state *OPCResourceState) error { - instanceName := getInstanceName(state) - - if _, err := state.Instances().GetInstance(instanceName); err != nil { - return fmt.Errorf("Error retrieving state of instance %s: %s", instanceName, err) - } - - return nil -} - -func testAccCheckSSHKeyExists(state *OPCResourceState) error { - keyName := state.Attributes["name"] - - if _, err := state.SSHKeys().GetSSHKey(keyName); err != nil { - return fmt.Errorf("Error retrieving state of key %s: %s", keyName, err) - } - - return nil -} - -func testAccCheckSSHKeyUpdated(state *OPCResourceState) error { - keyName := state.Attributes["name"] - info, err := state.SSHKeys().GetSSHKey(keyName) - if err != nil { - return err - } - if info.Key != updatedKey { - return fmt.Errorf("Expected key\n\t%s\nbut was\n\t%s", updatedKey, info.Key) - } - return nil -} - -func getInstanceName(rs *OPCResourceState) *compute.InstanceName { - return &compute.InstanceName{ - Name: rs.Attributes["name"], - ID: rs.Attributes["opcId"], - } -} - -func testAccCheckInstanceDestroyed(state *OPCResourceState) error { - instanceName := getInstanceName(state) - if info, err := state.Instances().GetInstance(instanceName); err == nil { - return fmt.Errorf("Instance %s still exists: %#v", instanceName, info) - } - - return nil -} - -const instanceName = "test_instance" -const keyName = "test_key" - -var instanceResourceName = fmt.Sprintf("opc_compute_instance.%s", instanceName) -var keyResourceName = fmt.Sprintf("opc_compute_ssh_key.%s", keyName) - -const originalKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" -const updatedKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHvb/2OSemgzUYLNW1/T3u33r7sZy1qbWtgVWiREH4gS5TVmDVPuvN1MFLdNqiWQA53gK8Gp24jtjNm9ftcPhicv81HVWJTB69C0sJGEfF0l4mgbemJLH3i37Mb6SdWJcGof9qHVDADPgiC8jIBVUhdiJSeq4fUJ3NQA2eUExBkRglQWairkNzPNA0mi3GL9KDGnoBnSCAXNGoKgDgIOqW0dYFP6oHyGWkF7V+/TME9aIQvmMpHjVzl7brZ/wED2t5vTJxxbgogHEmWnfs7p8EP5IsN6Vnjd0VNIt1tu3TduS8kH5npkPqZz8oIP93Ypxn0l7ZNEl9MahbhPj3gJ1YY7Cygrlt1VLC1ibBbOgIS2Lj6vGG/Yjkqs3Vw6qrmTRlsJ9c6bZO2xq0xzV11XQHvjPegBOClF6AztEe1jKU/RUFnzjIF8lUmM63fTaXuVkNERkTSE3E9XL3Uq6eqYdef7wHFFhCMSGotp3ANAb30kflysA9ID0b3o5QU2tB8OBxBicXQy11lh+u204YJuvIzeTXo+JAad5TWFlJcsUlbPFppLQdhUpoWaJouBGJV36DJb9R34i9T8Ze5tnJUQgPmMkERyPvb/+v5j3s2hs1A9WO6/MqmZd70gudsX/1bqWT898vCCOdM+CspNVY7nHVUtde7C6BrHzphr/C1YBXHw==" - -var testAccInstanceBasic = fmt.Sprintf(` -resource "opc_compute_instance" "%s" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.test_key.name}"] - attributes = "{\"foo\": \"bar\"}" - storage = { - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - } -} - -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} - -resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "%s" - enabled = true -} -`, instanceName, keyName, originalKey) - -var modifySSHKey = fmt.Sprintf(` -resource "opc_compute_instance" "%s" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.test_key.name}"] - attributes = "{\"foo\": \"bar\"}" - storage = { - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - } -} - -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} - -resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "%s" - enabled = true -} -`, instanceName, keyName, updatedKey) diff --git a/builtin/providers/oracleopc/resource_ip_association.go b/builtin/providers/oracleopc/resource_ip_association.go deleted file mode 100644 index 84df10ba8..000000000 --- a/builtin/providers/oracleopc/resource_ip_association.go +++ /dev/null @@ -1,103 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceIPAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceIPAssociationCreate, - Read: resourceIPAssociationRead, - Delete: resourceIPAssociationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "vcable": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "parentpool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceIPAssociationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - vcable, parentpool := getIPAssociationResourceData(d) - - log.Printf("[DEBUG] Creating ip association between vcable %s and parent pool %s", - vcable, parentpool) - - client := meta.(*OPCClient).IPAssociations() - info, err := client.CreateIPAssociation(vcable, parentpool) - if err != nil { - return fmt.Errorf("Error creating ip association between vcable %s and parent pool %s: %s", - vcable, parentpool, err) - } - - d.SetId(info.Name) - updateIPAssociationResourceData(d, info) - return nil -} - -func updateIPAssociationResourceData(d *schema.ResourceData, info *compute.IPAssociationInfo) { - d.Set("name", info.Name) - d.Set("parentpool", info.ParentPool) - d.Set("vcable", info.VCable) -} - -func resourceIPAssociationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPAssociations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ip association %s", name) - result, err := client.GetIPAssociation(name) - if err != nil { - // IP Association does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ip association %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ip association %s: %#v", name, result) - updateIPAssociationResourceData(d, result) - return nil -} - -func getIPAssociationResourceData(d *schema.ResourceData) (string, string) { - return d.Get("vcable").(string), d.Get("parentpool").(string) -} - -func resourceIPAssociationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPAssociations() - name := d.Get("name").(string) - - vcable, parentpool := getIPAssociationResourceData(d) - log.Printf("[DEBUG] Deleting ip association %s between vcable %s and parent pool %s", - name, vcable, parentpool) - - if err := client.DeleteIPAssociation(name); err != nil { - return fmt.Errorf("Error deleting ip association %s between vcable %s and parent pool %s: %s", - name, vcable, parentpool, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_ip_association_test.go b/builtin/providers/oracleopc/resource_ip_association_test.go deleted file mode 100644 index 44f48474f..000000000 --- a/builtin/providers/oracleopc/resource_ip_association_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceIPAssociation_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - ipAssociationResourceName, - testAccCheckIPAssociationDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccIPAssociationBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - ipAssociationResourceName, - testAccCheckIPAssociationExists), - ), - }, - }, - }) -} - -func testAccCheckIPAssociationExists(state *OPCResourceState) error { - associationName := getIPAssociationName(state) - - if _, err := state.IPAssociations().GetIPAssociation(associationName); err != nil { - return fmt.Errorf("Error retrieving state of ip assocation %s: %s", associationName, err) - } - - return nil -} - -func getIPAssociationName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckIPAssociationDestroyed(state *OPCResourceState) error { - associationName := getAssociationName(state) - if info, err := state.IPAssociations().GetIPAssociation(associationName); err == nil { - return fmt.Errorf("IP association %s still exists: %#v", associationName, info) - } - - return nil -} - -const ipAssociationName = "test_ip_association" - -var ipAssociationResourceName = fmt.Sprintf("opc_compute_ip_association.%s", ipAssociationName) - -var testAccIPAssociationBasic = fmt.Sprintf(` -resource "opc_compute_ip_reservation" "reservation1" { - parentpool = "/oracle/public/ippool" - permanent = true -} - -resource "opc_compute_ip_association" "%s" { - vcable = "${opc_compute_instance.test-instance1.vcable}" - parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" -} - -resource "opc_compute_instance" "test-instance1" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" -} -`, ipAssociationName) diff --git a/builtin/providers/oracleopc/resource_ip_reservation.go b/builtin/providers/oracleopc/resource_ip_reservation.go deleted file mode 100644 index fa25679d2..000000000 --- a/builtin/providers/oracleopc/resource_ip_reservation.go +++ /dev/null @@ -1,122 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceIPReservation() *schema.Resource { - return &schema.Resource{ - Create: resourceIPReservationCreate, - Read: resourceIPReservationRead, - Delete: resourceIPReservationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "permanent": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: true, - }, - - "parentpool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "ip": &schema.Schema{ - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - }, - } -} - -func resourceIPReservationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - parentpool, permanent, tags := getIPReservationResourceData(d) - - log.Printf("[DEBUG] Creating ip reservation from parentpool %s with tags=%s", - parentpool, tags) - - client := meta.(*OPCClient).IPReservations() - info, err := client.CreateIPReservation(parentpool, permanent, tags) - if err != nil { - return fmt.Errorf("Error creating ip reservation from parentpool %s with tags=%s: %s", - parentpool, tags, err) - } - - d.SetId(info.Name) - updateIPReservationResourceData(d, info) - return nil -} - -func updateIPReservationResourceData(d *schema.ResourceData, info *compute.IPReservationInfo) { - d.Set("name", info.Name) - d.Set("parentpool", info.ParentPool) - d.Set("permanent", info.Permanent) - d.Set("tags", info.Tags) - d.Set("ip", info.IP) -} - -func resourceIPReservationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPReservations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ip reservation %s", name) - result, err := client.GetIPReservation(name) - if err != nil { - // IP Reservation does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ip reservation %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ip reservation %s: %#v", name, result) - updateIPReservationResourceData(d, result) - return nil -} - -func getIPReservationResourceData(d *schema.ResourceData) (string, bool, []string) { - tagdata := d.Get("tags").([]interface{}) - tags := make([]string, len(tagdata)) - for i, tag := range tagdata { - tags[i] = tag.(string) - } - return d.Get("parentpool").(string), - d.Get("permanent").(bool), - tags -} - -func resourceIPReservationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPReservations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ip reservation %s", name) - - if err := client.DeleteIPReservation(name); err != nil { - return fmt.Errorf("Error deleting ip reservation %s", name) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_application.go b/builtin/providers/oracleopc/resource_security_application.go deleted file mode 100644 index b7205754c..000000000 --- a/builtin/providers/oracleopc/resource_security_application.go +++ /dev/null @@ -1,124 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityApplication() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityApplicationCreate, - Read: resourceSecurityApplicationRead, - Delete: resourceSecurityApplicationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "dport": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "icmptype": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "icmpcode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceSecurityApplicationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, protocol, dport, icmptype, icmpcode, description := getSecurityApplicationResourceData(d) - - log.Printf("[DEBUG] Creating security application %s", name) - - client := meta.(*OPCClient).SecurityApplications() - info, err := client.CreateSecurityApplication(name, protocol, dport, icmptype, icmpcode, description) - if err != nil { - return fmt.Errorf("Error creating security application %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityApplicationResourceData(d, info) - return nil -} - -func updateSecurityApplicationResourceData(d *schema.ResourceData, info *compute.SecurityApplicationInfo) { - d.Set("name", info.Name) - d.Set("protocol", info.Protocol) - d.Set("dport", info.DPort) - d.Set("icmptype", info.ICMPType) - d.Set("icmpcode", info.ICMPCode) - d.Set("description", info.Description) -} - -func resourceSecurityApplicationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityApplications() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security application %s", name) - result, err := client.GetSecurityApplication(name) - if err != nil { - // Security Application does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security application %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security application %s: %#v", name, result) - updateSecurityApplicationResourceData(d, result) - return nil -} - -func getSecurityApplicationResourceData(d *schema.ResourceData) (string, string, string, string, string, string) { - return d.Get("name").(string), - d.Get("protocol").(string), - d.Get("dport").(string), - d.Get("icmptype").(string), - d.Get("icmpcode").(string), - d.Get("description").(string) -} - -func resourceSecurityApplicationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityApplications() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting security application %s", name) - - if err := client.DeleteSecurityApplication(name); err != nil { - return fmt.Errorf("Error deleting security application %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_association.go b/builtin/providers/oracleopc/resource_security_association.go deleted file mode 100644 index 15a912657..000000000 --- a/builtin/providers/oracleopc/resource_security_association.go +++ /dev/null @@ -1,103 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityAssociationCreate, - Read: resourceSecurityAssociationRead, - Delete: resourceSecurityAssociationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "vcable": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "seclist": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceSecurityAssociationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - vcable, seclist := getSecurityAssociationResourceData(d) - - log.Printf("[DEBUG] Creating security association between vcable %s and security list %s", - vcable, seclist) - - client := meta.(*OPCClient).SecurityAssociations() - info, err := client.CreateSecurityAssociation(vcable, seclist) - if err != nil { - return fmt.Errorf("Error creating security association between vcable %s and security list %s: %s", - vcable, seclist, err) - } - - d.SetId(info.Name) - updateSecurityAssociationResourceData(d, info) - return nil -} - -func updateSecurityAssociationResourceData(d *schema.ResourceData, info *compute.SecurityAssociationInfo) { - d.Set("name", info.Name) - d.Set("seclist", info.SecList) - d.Set("vcable", info.VCable) -} - -func resourceSecurityAssociationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityAssociations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security association %s", name) - result, err := client.GetSecurityAssociation(name) - if err != nil { - // Security Association does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security association %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security association %s: %#v", name, result) - updateSecurityAssociationResourceData(d, result) - return nil -} - -func getSecurityAssociationResourceData(d *schema.ResourceData) (string, string) { - return d.Get("vcable").(string), d.Get("seclist").(string) -} - -func resourceSecurityAssociationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityAssociations() - name := d.Get("name").(string) - - vcable, seclist := getSecurityAssociationResourceData(d) - log.Printf("[DEBUG] Deleting security association %s between vcable %s and security list %s", - name, vcable, seclist) - - if err := client.DeleteSecurityAssociation(name); err != nil { - return fmt.Errorf("Error deleting security association %s between vcable %s and security list %s: %s", - name, vcable, seclist, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_association_test.go b/builtin/providers/oracleopc/resource_security_association_test.go deleted file mode 100644 index 604ef64cb..000000000 --- a/builtin/providers/oracleopc/resource_security_association_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceSecurityAssociation_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - associationResourceName, - testAccCheckAssociationDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccSecurityAssociationBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - associationResourceName, - testAccCheckAssociationExists), - ), - }, - }, - }) -} - -func testAccCheckAssociationExists(state *OPCResourceState) error { - associationName := getAssociationName(state) - - if _, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err != nil { - return fmt.Errorf("Error retrieving state of security assocation %s: %s", associationName, err) - } - - return nil -} - -func getAssociationName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckAssociationDestroyed(state *OPCResourceState) error { - associationName := getAssociationName(state) - if info, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err == nil { - return fmt.Errorf("Association %s still exists: %#v", associationName, info) - } - - return nil -} - -const associationName = "test_rule" - -var associationResourceName = fmt.Sprintf("opc_compute_security_association.%s", associationName) - -var testAccSecurityAssociationBasic = fmt.Sprintf(` -resource "opc_compute_security_list" "sec-list1" { - name = "sec-list-1" - policy = "PERMIT" - outbound_cidr_policy = "DENY" -} - -resource "opc_compute_security_association" "%s" { - vcable = "${opc_compute_instance.test-instance1.vcable}" - seclist = "${opc_compute_security_list.sec-list1.name}" -} - -resource "opc_compute_instance" "test-instance1" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" -} -`, ruleName) diff --git a/builtin/providers/oracleopc/resource_security_ip_list.go b/builtin/providers/oracleopc/resource_security_ip_list.go deleted file mode 100644 index 6a3e66b28..000000000 --- a/builtin/providers/oracleopc/resource_security_ip_list.go +++ /dev/null @@ -1,117 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityIPList() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityIPListCreate, - Read: resourceSecurityIPListRead, - Update: resourceSecurityIPListUpdate, - Delete: resourceSecurityIPListDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_entries": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourceSecurityIPListCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, ipEntries := getSecurityIPListResourceData(d) - - log.Printf("[DEBUG] Creating security IP list with name %s, entries %s", - name, ipEntries) - - client := meta.(*OPCClient).SecurityIPLists() - info, err := client.CreateSecurityIPList(name, ipEntries) - if err != nil { - return fmt.Errorf("Error creating security IP list %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityIPListResourceData(d, info) - return nil -} - -func updateSecurityIPListResourceData(d *schema.ResourceData, info *compute.SecurityIPListInfo) { - d.Set("name", info.Name) - d.Set("entries", info.SecIPEntries) -} - -func resourceSecurityIPListRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityIPLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security IP list %s", name) - result, err := client.GetSecurityIPList(name) - if err != nil { - // Security IP List does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security IP list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security IP list %s: %#v", name, result) - updateSecurityIPListResourceData(d, result) - return nil -} - -func getSecurityIPListResourceData(d *schema.ResourceData) (string, []string) { - name := d.Get("name").(string) - ipEntries := d.Get("ip_entries").([]interface{}) - ipEntryStrings := []string{} - for _, entry := range ipEntries { - ipEntryStrings = append(ipEntryStrings, entry.(string)) - } - return name, ipEntryStrings -} - -func resourceSecurityIPListUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityIPLists() - name, entries := getSecurityIPListResourceData(d) - - log.Printf("[DEBUG] Updating security IP list %s with ip entries %s", - name, entries) - - info, err := client.UpdateSecurityIPList(name, entries) - if err != nil { - return fmt.Errorf("Error updating security IP list %s: %s", name, err) - } - - updateSecurityIPListResourceData(d, info) - return nil -} - -func resourceSecurityIPListDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityIPLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting security IP list %s", name) - if err := client.DeleteSecurityIPList(name); err != nil { - return fmt.Errorf("Error deleting security IP list %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_list.go b/builtin/providers/oracleopc/resource_security_list.go deleted file mode 100644 index eea11bbb1..000000000 --- a/builtin/providers/oracleopc/resource_security_list.go +++ /dev/null @@ -1,119 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityList() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityListCreate, - Read: resourceSecurityListRead, - Update: resourceSecurityListUpdate, - Delete: resourceSecurityListDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "outbound_cidr_policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSecurityListCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) - - log.Printf("[DEBUG] Creating security list with name %s, policy %s, outbound CIDR policy %s", - name, policy, outboundCIDRPolicy) - - client := meta.(*OPCClient).SecurityLists() - info, err := client.CreateSecurityList(name, policy, outboundCIDRPolicy) - if err != nil { - return fmt.Errorf("Error creating security list %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityListResourceData(d, info) - return nil -} - -func updateSecurityListResourceData(d *schema.ResourceData, info *compute.SecurityListInfo) { - d.Set("name", info.Name) - d.Set("policy", info.Policy) - d.Set("outbound_cidr_policy", info.OutboundCIDRPolicy) -} - -func resourceSecurityListRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security list %s", name) - result, err := client.GetSecurityList(name) - if err != nil { - // Security List does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSecurityListResourceData(d, result) - return nil -} - -func getSecurityListResourceData(d *schema.ResourceData) (string, string, string) { - return d.Get("name").(string), - d.Get("policy").(string), - d.Get("outbound_cidr_policy").(string) -} - -func resourceSecurityListUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityLists() - name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) - - log.Printf("[DEBUG] Updating security list %s with policy %s, outbound_cidr_policy %s", - name, policy, outboundCIDRPolicy) - - info, err := client.UpdateSecurityList(name, policy, outboundCIDRPolicy) - if err != nil { - return fmt.Errorf("Error updating security list %s: %s", name, err) - } - - updateSecurityListResourceData(d, info) - return nil -} - -func resourceSecurityListDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSecurityList(name); err != nil { - return fmt.Errorf("Error deleting security list %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_rule.go b/builtin/providers/oracleopc/resource_security_rule.go deleted file mode 100644 index 0d9eb562c..000000000 --- a/builtin/providers/oracleopc/resource_security_rule.go +++ /dev/null @@ -1,143 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityRule() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityRuleCreate, - Read: resourceSecurityRuleRead, - Update: resourceSecurityRuleUpdate, - Delete: resourceSecurityRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "source_list": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination_list": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "application": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "disabled": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSecurityRuleCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) - - log.Printf("[DEBUG] Creating security list with name %s, sourceList %s, destinationList %s, application %s, action %s, disabled %s", - name, sourceList, destinationList, application, action, disabled) - - client := meta.(*OPCClient).SecurityRules() - info, err := client.CreateSecurityRule(name, sourceList, destinationList, application, action, disabled) - if err != nil { - return fmt.Errorf("Error creating security rule %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityRuleResourceData(d, info) - return nil -} - -func updateSecurityRuleResourceData(d *schema.ResourceData, info *compute.SecurityRuleInfo) { - d.Set("name", info.Name) - d.Set("source_list", info.SourceList) - d.Set("destination_list", info.DestinationList) - d.Set("application", info.Application) - d.Set("action", info.Action) - d.Set("disabled", info.Disabled) -} - -func resourceSecurityRuleRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityRules() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security rule %s", name) - result, err := client.GetSecurityRule(name) - if err != nil { - // Security Rule does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSecurityRuleResourceData(d, result) - return nil -} - -func getSecurityRuleResourceData(d *schema.ResourceData) (string, string, string, string, string, bool) { - return d.Get("name").(string), - d.Get("source_list").(string), - d.Get("destination_list").(string), - d.Get("application").(string), - d.Get("action").(string), - d.Get("disabled").(bool) -} - -func resourceSecurityRuleUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityRules() - name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) - - log.Printf("[DEBUG] Updating security list %s with sourceList %s, destinationList %s, application %s, action %s, disabled %s", - name, sourceList, destinationList, application, action, disabled) - - info, err := client.UpdateSecurityRule(name, sourceList, destinationList, application, action, disabled) - if err != nil { - return fmt.Errorf("Error updating security rule %s: %s", name, err) - } - - updateSecurityRuleResourceData(d, info) - return nil -} - -func resourceSecurityRuleDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityRules() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSecurityRule(name); err != nil { - return fmt.Errorf("Error deleting security rule %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_rule_test.go b/builtin/providers/oracleopc/resource_security_rule_test.go deleted file mode 100644 index f09c2b879..000000000 --- a/builtin/providers/oracleopc/resource_security_rule_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceSecurityRule_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - ruleResourceName, - testAccCheckRuleDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccSecurityRuleBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - ruleResourceName, - testAccCheckRuleExists), - ), - }, - }, - }) -} - -func testAccCheckRuleExists(state *OPCResourceState) error { - ruleName := getRuleName(state) - - if _, err := state.SecurityRules().GetSecurityRule(ruleName); err != nil { - return fmt.Errorf("Error retrieving state of security rule %s: %s", ruleName, err) - } - - return nil -} - -func getRuleName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckRuleDestroyed(state *OPCResourceState) error { - ruleName := getRuleName(state) - if info, err := state.SecurityRules().GetSecurityRule(ruleName); err == nil { - return fmt.Errorf("Rule %s still exists: %#v", ruleName, info) - } - - return nil -} - -const ruleName = "test_rule" -const secListName = "sec-list1" -const secIpListName = "sec-ip-list1" - -var ruleResourceName = fmt.Sprintf("opc_compute_security_rule.%s", ruleName) - -var testAccSecurityRuleBasic = fmt.Sprintf(` -resource "opc_compute_security_rule" "%s" { - name = "test" - source_list = "seclist:${opc_compute_security_list.sec-list1.name}" - destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" - action = "PERMIT" - application = "${opc_compute_security_application.spring-boot.name}" - disabled = false -} - -resource "opc_compute_security_list" "%s" { - name = "sec-list-1" - policy = "PERMIT" - outbound_cidr_policy = "DENY" -} - -resource "opc_compute_security_application" "spring-boot" { - name = "spring-boot" - protocol = "tcp" - dport = "8080" -} - -resource "opc_compute_security_ip_list" "%s" { - name = "sec-ip-list1" - ip_entries = ["217.138.34.4"] -} -`, ruleName, secListName, secIpListName) diff --git a/builtin/providers/oracleopc/resource_ssh_key.go b/builtin/providers/oracleopc/resource_ssh_key.go deleted file mode 100644 index 29f68b4aa..000000000 --- a/builtin/providers/oracleopc/resource_ssh_key.go +++ /dev/null @@ -1,117 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSSHKey() *schema.Resource { - return &schema.Resource{ - Create: resourceSSHKeyCreate, - Read: resourceSSHKeyRead, - Update: resourceSSHKeyUpdate, - Delete: resourceSSHKeyDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - key := d.Get("key").(string) - enabled := d.Get("enabled").(bool) - - log.Printf("[DEBUG] Creating ssh key with name %s, key %s, enabled %s", - name, key, enabled) - - info, err := client.CreateSSHKey(name, key, enabled) - if err != nil { - return fmt.Errorf("Error creating ssh key %s: %s", name, err) - } - - d.SetId(info.Name) - updateSSHKeyResourceData(d, info) - return nil -} - -func updateSSHKeyResourceData(d *schema.ResourceData, info *compute.SSHKeyInfo) { - d.Set("name", info.Name) - d.Set("key", info.Key) - d.Set("enabled", info.Enabled) -} - -func resourceSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ssh key %s", name) - result, err := client.GetSSHKey(name) - if err != nil { - // SSH Key does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ssh key %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSSHKeyResourceData(d, result) - return nil -} - -func resourceSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - key := d.Get("key").(string) - enabled := d.Get("enabled").(bool) - - log.Printf("[DEBUG] Updating ssh key with name %s, key %s, enabled %s", - name, key, enabled) - - info, err := client.UpdateSSHKey(name, key, enabled) - if err != nil { - return fmt.Errorf("Error updating ssh key %s: %s", name, err) - } - - updateSSHKeyResourceData(d, info) - return nil -} - -func resourceSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSSHKey(name); err != nil { - return fmt.Errorf("Error deleting ssh key %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_storage_volume.go b/builtin/providers/oracleopc/resource_storage_volume.go deleted file mode 100644 index 2d80d09f2..000000000 --- a/builtin/providers/oracleopc/resource_storage_volume.go +++ /dev/null @@ -1,301 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceStorageVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageVolumeCreate, - Read: resourceStorageVolumeRead, - Update: resourceStorageVolumeUpdate, - Delete: resourceStorageVolumeDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sizeInBytes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - - "storage": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "/oracle/public/storage/default", - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "bootableImage": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "bootableImageVersion": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: -1, - }, - - "snapshot": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "account": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "snapshotId": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceStorageVolumeCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - properties := []string{d.Get("storage").(string)} - - spec := sv.NewStorageVolumeSpec( - d.Get("size").(string), - properties, - name) - - if d.Get("description").(string) != "" { - spec.SetDescription(d.Get("description").(string)) - } - - spec.SetTags(getTags(d)) - - if d.Get("bootableImage") != "" { - spec.SetBootableImage(d.Get("bootableImage").(string), d.Get("bootableImageVersion").(int)) - } - - if len(d.Get("snapshot").(*schema.Set).List()) > 0 { - snapshotDetails := d.Get("snapshot").(*schema.Set).List()[0].(map[string]interface{}) - spec.SetSnapshot( - snapshotDetails["name"].(string), - snapshotDetails["account"].(string), - ) - } - - if d.Get("snapshotId") != "" { - spec.SetSnapshotID(d.Get("snapshotId").(string)) - } - - log.Printf("[DEBUG] Creating storage volume %s with spec %#v", name, spec) - err := sv.CreateStorageVolume(spec) - if err != nil { - return fmt.Errorf("Error creating storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for storage volume %s to come online", name) - info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for storage volume %s to come online: %s", name, err) - } - - log.Printf("[DEBUG] Created storage volume %s: %#v", name, info) - - cachedAttachments, attachmentsFound := meta.(*OPCClient).storageAttachmentsByVolumeCache[name] - if attachmentsFound { - log.Printf("[DEBUG] Rebuilding storage attachments for volume %s", name) - for _, cachedAttachment := range cachedAttachments { - log.Printf("[DEBUG] Rebuilding storage attachments between volume %s and instance %s", - name, - cachedAttachment.instanceName) - - attachmentInfo, err := meta.(*OPCClient).StorageAttachments().CreateStorageAttachment( - cachedAttachment.index, - cachedAttachment.instanceName, - name, - ) - - if err != nil { - return fmt.Errorf( - "Error recreating storage attachment between volume %s and instance %s: %s", - name, - *cachedAttachment.instanceName, - err) - } - err = meta.(*OPCClient).StorageAttachments().WaitForStorageAttachmentCreated( - attachmentInfo.Name, - meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf( - "Error recreating storage attachment between volume %s and instance %s: %s", - name, - *cachedAttachment.instanceName, - err) - } - } - meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = nil - } - - d.SetId(name) - updateResourceData(d, info) - return nil -} - -func getTags(d *schema.ResourceData) []string { - tags := []string{} - for _, i := range d.Get("tags").([]interface{}) { - tags = append(tags, i.(string)) - } - return tags -} - -func updateResourceData(d *schema.ResourceData, info *compute.StorageVolumeInfo) error { - d.Set("name", info.Name) - d.Set("description", info.Description) - d.Set("storage", info.Properties[0]) - d.Set("sizeInBytes", info.Size) - d.Set("tags", info.Tags) - d.Set("bootableImage", info.ImageList) - d.Set("bootableImageVersion", info.ImageListEntry) - if info.Snapshot != "" { - d.Set("snapshot", map[string]interface{}{ - "name": info.Snapshot, - "account": info.SnapshotAccount, - }) - } - d.Set("snapshotId", info.SnapshotID) - - return nil -} - -func resourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of storage volume %s", name) - result, err := sv.GetStorageVolume(name) - if err != nil { - // Volume doesn't exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading storage volume %s: %s", name, err) - } - - if len(result.Result) == 0 { - // Volume doesn't exist - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Read state of storage volume %s: %#v", name, &result.Result[0]) - updateResourceData(d, &result.Result[0]) - - return nil -} - -func resourceStorageVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - description := d.Get("description").(string) - size := d.Get("size").(string) - tags := getTags(d) - - log.Printf("[DEBUG] Updating storage volume %s with size %s, description %s, tags %#v", name, size, description, tags) - err := sv.UpdateStorageVolume(name, size, description, tags) - - if err != nil { - return fmt.Errorf("Error updating storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for updated storage volume %s to come online", name) - info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for updated storage volume %s to come online: %s", name, err) - } - - log.Printf("[DEBUG] Updated storage volume %s: %#v", name, info) - updateResourceData(d, info) - return nil -} - -func resourceStorageVolumeDelete(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - - sva := meta.(*OPCClient).StorageAttachments() - attachments, err := sva.GetStorageAttachmentsForVolume(name) - if err != nil { - return fmt.Errorf("Error retrieving storage attachments for volume %s: %s", name, err) - } - - attachmentsToCache := make([]storageAttachment, len(*attachments)) - for index, attachment := range *attachments { - log.Printf("[DEBUG] Deleting storage attachment %s for volume %s", attachment.Name, name) - sva.DeleteStorageAttachment(attachment.Name) - sva.WaitForStorageAttachmentDeleted(attachment.Name, meta.(*OPCClient).MaxRetryTimeout) - attachmentsToCache[index] = storageAttachment{ - index: attachment.Index, - instanceName: compute.InstanceNameFromString(attachment.InstanceName), - } - } - meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = attachmentsToCache - - log.Printf("[DEBUG] Deleting storage volume %s", name) - err = sv.DeleteStorageVolume(name) - if err != nil { - return fmt.Errorf("Error deleting storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for storage volume %s to finish deleting", name) - err = sv.WaitForStorageVolumeDeleted(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for storage volume %s to finish deleting: %s", name, err) - } - - log.Printf("[DEBUG] Deleted storage volume %s", name) - return nil -} diff --git a/builtin/providers/oracleopc/resource_storage_volume_test.go b/builtin/providers/oracleopc/resource_storage_volume_test.go deleted file mode 100644 index d168b5309..000000000 --- a/builtin/providers/oracleopc/resource_storage_volume_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCStorageVolume_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - "opc_compute_storage_volume.test_volume", - testAccCheckStorageVolumeDestroyed), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageVolumeBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - "opc_compute_storage_volume.test_volume", - testAccCheckStorageVolumeExists), - ), - }, - }, - }) -} - -func testAccCheckStorageVolumeExists(state *OPCResourceState) error { - sv := state.StorageVolumes() - volumeName := state.Attributes["name"] - - info, err := sv.GetStorageVolume(volumeName) - if err != nil { - return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) - } - - if len(info.Result) == 0 { - return fmt.Errorf("No info found for volume %s", volumeName) - } - - return nil -} - -func testAccCheckStorageVolumeDestroyed(state *OPCResourceState) error { - sv := state.StorageVolumes() - - volumeName := state.Attributes["name"] - - info, err := sv.GetStorageVolume(volumeName) - if err != nil { - return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) - } - - if len(info.Result) != 0 { - return fmt.Errorf("Volume %s still exists", volumeName) - } - - return nil -} - -const testAccStorageVolumeBasic = ` -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} -` diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index 2f48908c7..0d6f631b8 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -46,6 +46,7 @@ import ( nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad" ns1provider "github.com/hashicorp/terraform/builtin/providers/ns1" nullprovider "github.com/hashicorp/terraform/builtin/providers/null" + opcprovider "github.com/hashicorp/terraform/builtin/providers/opc" openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack" opsgenieprovider "github.com/hashicorp/terraform/builtin/providers/opsgenie" packetprovider "github.com/hashicorp/terraform/builtin/providers/packet" @@ -122,6 +123,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{ "nomad": nomadprovider.Provider, "ns1": ns1provider.Provider, "null": nullprovider.Provider, + "opc": opcprovider.Provider, "openstack": openstackprovider.Provider, "opsgenie": opsgenieprovider.Provider, "packet": packetprovider.Provider, diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE b/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go new file mode 100644 index 000000000..5543c4046 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/acl.go @@ -0,0 +1,138 @@ +package compute + +// ACLsClient is a client for the ACLs functions of the Compute API. +type ACLsClient struct { + ResourceClient +} + +const ( + ACLDescription = "acl" + ACLContainerPath = "/network/v1/acl/" + ACLResourcePath = "/network/v1/acl" +) + +// ACLs obtains a ACLsClient which can be used to access to the +// ACLs functions of the Compute API +func (c *Client) ACLs() *ACLsClient { + return &ACLsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: ACLDescription, + ContainerPath: ACLContainerPath, + ResourceRootPath: ACLResourcePath, + }} +} + +// ACLInfo describes an existing ACL. +type ACLInfo struct { + // Description of the ACL + Description string `json:"description"` + // Indicates whether the ACL is enabled + Enabled bool `json:"enabledFlag"` + // The name of the ACL + Name string `json:"name"` + // Tags associated with the ACL + Tags []string `json:"tags"` + // Uniform Resource Identifier for the ACL + URI string `json:"uri"` +} + +// CreateACLInput defines a ACL to be created. +type CreateACLInput struct { + // Description of the ACL + // Optional + Description string `json:"description"` + + // Enables or disables the ACL. Set to true by default. + //Set this to false to disable the ACL. + // Optional + Enabled bool `json:"enabledFlag"` + + // The name of the ACL to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Strings that you can use to tag the ACL. + // Optional + Tags []string `json:"tags"` +} + +// CreateACL creates a new ACL. +func (c *ACLsClient) CreateACL(createInput *CreateACLInput) (*ACLInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + + var aclInfo ACLInfo + if err := c.createResource(createInput, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// GetACLInput describes the ACL to get +type GetACLInput struct { + // The name of the ACL to query for + // Required + Name string `json:"name"` +} + +// GetACL retrieves the ACL with the given name. +func (c *ACLsClient) GetACL(getInput *GetACLInput) (*ACLInfo, error) { + var aclInfo ACLInfo + if err := c.getResource(getInput.Name, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// UpdateACLInput describes a secruity rule to update +type UpdateACLInput struct { + // Description of the ACL + // Optional + Description string `json:"description"` + + // Enables or disables the ACL. Set to true by default. + //Set this to false to disable the ACL. + // Optional + Enabled bool `json:"enabledFlag"` + + // The name of the ACL to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Strings that you can use to tag the ACL. + // Optional + Tags []string `json:"tags"` +} + +// UpdateACL modifies the properties of the ACL with the given name. +func (c *ACLsClient) UpdateACL(updateInput *UpdateACLInput) (*ACLInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + + var aclInfo ACLInfo + if err := c.updateResource(updateInput.Name, updateInput, &aclInfo); err != nil { + return nil, err + } + + return c.success(&aclInfo) +} + +// DeleteACLInput describes the ACL to delete +type DeleteACLInput struct { + // The name of the ACL to delete. + // Required + Name string `json:"name"` +} + +// DeleteACL deletes the ACL with the given name. +func (c *ACLsClient) DeleteACL(deleteInput *DeleteACLInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *ACLsClient) success(aclInfo *ACLInfo) (*ACLInfo, error) { + aclInfo.Name = c.getUnqualifiedName(aclInfo.Name) + return aclInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go new file mode 100644 index 000000000..d1b355087 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/authentication.go @@ -0,0 +1,34 @@ +package compute + +import ( + "fmt" + "time" +) + +// AuthenticationReq represents the body of an authentication request. +type AuthenticationReq struct { + User string `json:"user"` + Password string `json:"password"` +} + +// Get a new auth cookie for the compute client +func (c *Client) getAuthenticationCookie() error { + req := AuthenticationReq{ + User: c.getUserName(), + Password: *c.password, + } + + rsp, err := c.executeRequest("POST", "/authenticate/", req) + if err != nil { + return err + } + + if len(rsp.Cookies()) == 0 { + return fmt.Errorf("No authentication cookie found in response %#v", rsp) + } + + c.debugLogString("Successfully authenticated to OPC") + c.authCookie = rsp.Cookies()[0] + c.cookieIssued = time.Now() + return nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/client.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/client.go new file mode 100644 index 000000000..015b87acd --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/client.go @@ -0,0 +1,238 @@ +package compute + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const CMP_USERNAME = "/Compute-%s/%s" +const CMP_QUALIFIED_NAME = "%s/%s" + +// Client represents an authenticated compute client, with compute credentials and an api client. +type Client struct { + identityDomain *string + userName *string + password *string + apiEndpoint *url.URL + httpClient *http.Client + authCookie *http.Cookie + cookieIssued time.Time + logger opc.Logger + loglevel opc.LogLevelType +} + +func NewComputeClient(c *opc.Config) (*Client, error) { + // First create a client + client := &Client{ + identityDomain: c.IdentityDomain, + userName: c.Username, + password: c.Password, + apiEndpoint: c.APIEndpoint, + httpClient: c.HTTPClient, + loglevel: c.LogLevel, + } + + // Setup logger; defaults to stdout + if c.Logger == nil { + client.logger = opc.NewDefaultLogger() + } + + // If LogLevel was not set to something different, + // double check for env var + if c.LogLevel == 0 { + client.loglevel = opc.LogLevel() + } + + if err := client.getAuthenticationCookie(); err != nil { + return nil, err + } + + return client, nil +} + +func (c *Client) executeRequest(method, path string, body interface{}) (*http.Response, error) { + // Parse URL Path + urlPath, err := url.Parse(path) + if err != nil { + return nil, err + } + + // Marshall request body + var requestBody io.ReadSeeker + var marshaled []byte + if body != nil { + marshaled, err = json.Marshal(body) + if err != nil { + return nil, err + } + requestBody = bytes.NewReader(marshaled) + } + + // Create request + req, err := http.NewRequest(method, c.formatURL(urlPath), requestBody) + if err != nil { + return nil, err + } + + debugReqString := fmt.Sprintf("HTTP %s Req (%s)", method, path) + if body != nil { + req.Header.Set("Content-Type", "application/oracle-compute-v3+json") + // Don't leak creds in STDERR + if path != "/authenticate/" { + debugReqString = fmt.Sprintf("%s:\n %s", debugReqString, string(marshaled)) + } + } + + // Log the request before the authentication cookie, so as not to leak credentials + c.debugLogString(debugReqString) + + // If we have an authentication cookie, let's authenticate, refreshing cookie if need be + if c.authCookie != nil { + if time.Since(c.cookieIssued).Minutes() > 25 { + if err := c.getAuthenticationCookie(); err != nil { + return nil, err + } + } + req.AddCookie(c.authCookie) + } + + // Execute request with supplied client + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { + return resp, nil + } + + oracleErr := &opc.OracleError{ + StatusCode: resp.StatusCode, + } + + // Even though the returned body will be in json form, it's undocumented what + // fields are actually returned. Once we get documentation of the actual + // error fields that are possible to be returned we can have stricter error types. + if resp.Body != nil { + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + oracleErr.Message = buf.String() + } + + return nil, oracleErr +} + +func (c *Client) formatURL(path *url.URL) string { + return c.apiEndpoint.ResolveReference(path).String() +} + +func (c *Client) getUserName() string { + return fmt.Sprintf(CMP_USERNAME, *c.identityDomain, *c.userName) +} + +// From compute_client +// GetObjectName returns the fully-qualified name of an OPC object, e.g. /identity-domain/user@email/{name} +func (c *Client) getQualifiedName(name string) string { + if name == "" { + return "" + } + if strings.HasPrefix(name, "/oracle") || strings.HasPrefix(name, "/Compute-") { + return name + } + return fmt.Sprintf(CMP_QUALIFIED_NAME, c.getUserName(), name) +} + +func (c *Client) getObjectPath(root, name string) string { + return fmt.Sprintf("%s%s", root, c.getQualifiedName(name)) +} + +// GetUnqualifiedName returns the unqualified name of an OPC object, e.g. the {name} part of /identity-domain/user@email/{name} +func (c *Client) getUnqualifiedName(name string) string { + if name == "" { + return name + } + if strings.HasPrefix(name, "/oracle") { + return name + } + if !strings.Contains(name, "/") { + return name + } + + nameParts := strings.Split(name, "/") + return strings.Join(nameParts[3:], "/") +} + +func (c *Client) unqualify(names ...*string) { + for _, name := range names { + *name = c.getUnqualifiedName(*name) + } +} + +func (c *Client) unqualifyUrl(url *string) { + var validID = regexp.MustCompile(`(\/(Compute[^\/\s]+))(\/[^\/\s]+)(\/[^\/\s]+)`) + name := validID.FindString(*url) + *url = c.getUnqualifiedName(name) +} + +func (c *Client) getQualifiedList(list []string) []string { + for i, name := range list { + list[i] = c.getQualifiedName(name) + } + return list +} + +func (c *Client) getUnqualifiedList(list []string) []string { + for i, name := range list { + list[i] = c.getUnqualifiedName(name) + } + return list +} + +func (c *Client) getQualifiedListName(name string) string { + nameParts := strings.Split(name, ":") + listType := nameParts[0] + listName := nameParts[1] + return fmt.Sprintf("%s:%s", listType, c.getQualifiedName(listName)) +} + +func (c *Client) unqualifyListName(qualifiedName string) string { + nameParts := strings.Split(qualifiedName, ":") + listType := nameParts[0] + listName := nameParts[1] + return fmt.Sprintf("%s:%s", listType, c.getUnqualifiedName(listName)) +} + +// Retry function +func (c *Client) waitFor(description string, timeoutSeconds int, test func() (bool, error)) error { + tick := time.Tick(1 * time.Second) + + for i := 0; i < timeoutSeconds; i++ { + select { + case <-tick: + completed, err := test() + c.debugLogString(fmt.Sprintf("Waiting for %s (%d/%ds)", description, i, timeoutSeconds)) + if err != nil || completed { + return err + } + } + } + return fmt.Errorf("Timeout waiting for %s", description) +} + +// Used to determine if the checked resource was found or not. +func WasNotFoundError(e error) bool { + err, ok := e.(*opc.OracleError) + if ok { + return err.StatusCode == 404 + } + return false +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go new file mode 100644 index 000000000..229b75195 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go @@ -0,0 +1,154 @@ +package compute + +const ( + ImageListDescription = "Image List" + ImageListContainerPath = "/imagelist/" + ImageListResourcePath = "/imagelist" +) + +// ImageListClient is a client for the Image List functions of the Compute API. +type ImageListClient struct { + ResourceClient +} + +// ImageList obtains an ImageListClient which can be used to access to the +// Image List functions of the Compute API +func (c *Client) ImageList() *ImageListClient { + return &ImageListClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: ImageListDescription, + ContainerPath: ImageListContainerPath, + ResourceRootPath: ImageListResourcePath, + }} +} + +type ImageListEntry struct { + // User-defined parameters, in JSON format, that can be passed to an instance of this machine image when it is launched. + Attributes map[string]interface{} `json:"attributes"` + + // Name of the Image List. + ImageList string `json:"imagelist"` + + // A list of machine images. + MachineImages []string `json:"machineimages"` + + // Uniform Resource Identifier. + URI string `json:"uri"` + + // Version number of these Machine Images in the Image List. + Version int `json:"version"` +} + +// ImageList describes an existing Image List. +type ImageList struct { + // The image list entry to be used, by default, when launching instances using this image list + Default int `json:"default"` + + // A description of this image list. + Description string `json:"description"` + + // Each machine image in an image list is identified by an image list entry. + Entries []ImageListEntry `json:"entries"` + + // The name of the Image List + Name string `json:"name"` + + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateImageListInput defines an Image List to be created. +type CreateImageListInput struct { + // The image list entry to be used, by default, when launching instances using this image list. + // If you don't specify this value, it is set to 1. + // Optional + Default int `json:"default"` + + // A description of this image list. + // Required + Description string `json:"description"` + + // The name of the Image List + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` +} + +// CreateImageList creates a new Image List with the given name, key and enabled flag. +func (c *ImageListClient) CreateImageList(createInput *CreateImageListInput) (*ImageList, error) { + var imageList ImageList + createInput.Name = c.getQualifiedName(createInput.Name) + if err := c.createResource(&createInput, &imageList); err != nil { + return nil, err + } + + return c.success(&imageList) +} + +// DeleteKeyInput describes the image list to delete +type DeleteImageListInput struct { + // The name of the Image List + Name string `json:name` +} + +// DeleteImageList deletes the Image List with the given name. +func (c *ImageListClient) DeleteImageList(deleteInput *DeleteImageListInput) error { + deleteInput.Name = c.getQualifiedName(deleteInput.Name) + return c.deleteResource(deleteInput.Name) +} + +// GetImageListInput describes the image list to get +type GetImageListInput struct { + // The name of the Image List + Name string `json:name` +} + +// GetImageList retrieves the Image List with the given name. +func (c *ImageListClient) GetImageList(getInput *GetImageListInput) (*ImageList, error) { + getInput.Name = c.getQualifiedName(getInput.Name) + + var imageList ImageList + if err := c.getResource(getInput.Name, &imageList); err != nil { + return nil, err + } + + return c.success(&imageList) +} + +// UpdateImageListInput defines an Image List to be updated +type UpdateImageListInput struct { + // The image list entry to be used, by default, when launching instances using this image list. + // If you don't specify this value, it is set to 1. + // Optional + Default int `json:"default"` + + // A description of this image list. + // Required + Description string `json:"description"` + + // The name of the Image List + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` +} + +// UpdateImageList updates the key and enabled flag of the Image List with the given name. +func (c *ImageListClient) UpdateImageList(updateInput *UpdateImageListInput) (*ImageList, error) { + var imageList ImageList + updateInput.Name = c.getQualifiedName(updateInput.Name) + if err := c.updateResource(updateInput.Name, updateInput, &imageList); err != nil { + return nil, err + } + return c.success(&imageList) +} + +func (c *ImageListClient) success(imageList *ImageList) (*ImageList, error) { + c.unqualify(&imageList.Name) + + for _, v := range imageList.Entries { + v.MachineImages = c.getUnqualifiedList(v.MachineImages) + } + + return imageList, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go new file mode 100644 index 000000000..bcc45b8f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list_entries.go @@ -0,0 +1,122 @@ +package compute + +import "fmt" + +const ( + ImageListEntryDescription = "image list entry" + ImageListEntryContainerPath = "/imagelist" + ImageListEntryResourcePath = "/imagelist" +) + +type ImageListEntriesClient struct { + ResourceClient +} + +// ImageListEntries() returns an ImageListEntriesClient that can be used to access the +// necessary CRUD functions for Image List Entry's. +func (c *Client) ImageListEntries() *ImageListEntriesClient { + return &ImageListEntriesClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: ImageListEntryDescription, + ContainerPath: ImageListEntryContainerPath, + ResourceRootPath: ImageListEntryResourcePath, + }, + } +} + +// ImageListEntryInfo contains the exported fields necessary to hold all the information about an +// Image List Entry +type ImageListEntryInfo struct { + // User-defined parameters, in JSON format, that can be passed to an instance of this machine + // image when it is launched. This field can be used, for example, to specify the location of + // a database server and login details. Instance metadata, including user-defined data is available + // at http://192.0.0.192/ within an instance. See Retrieving User-Defined Instance Attributes in Using + // Oracle Compute Cloud Service (IaaS). + Attributes map[string]interface{} `json:"attributes"` + // Name of the imagelist. + Name string `json:"imagelist"` + // A list of machine images. + MachineImages []string `json:"machineimages"` + // Uniform Resource Identifier for the Image List Entry + Uri string `json:"uri"` + // Version number of these machineImages in the imagelist. + Version int `json:"version"` +} + +type CreateImageListEntryInput struct { + // The name of the Image List + Name string + // User-defined parameters, in JSON format, that can be passed to an instance of this machine + // image when it is launched. This field can be used, for example, to specify the location of + // a database server and login details. Instance metadata, including user-defined data is + //available at http://192.0.0.192/ within an instance. See Retrieving User-Defined Instance + //Attributes in Using Oracle Compute Cloud Service (IaaS). + // Optional + Attributes map[string]interface{} `json:"attributes"` + // A list of machine images. + // Required + MachineImages []string `json:"machineimages"` + // The unique version of the entry in the image list. + // Required + Version int `json:"version"` +} + +// Create a new Image List Entry from an ImageListEntriesClient and an input struct. +// Returns a populated Info struct for the Image List Entry, and any errors +func (c *ImageListEntriesClient) CreateImageListEntry(input *CreateImageListEntryInput) (*ImageListEntryInfo, error) { + c.updateClientPaths(input.Name, -1) + var imageListEntryInfo ImageListEntryInfo + if err := c.createResource(&input, &imageListEntryInfo); err != nil { + return nil, err + } + return c.success(&imageListEntryInfo) +} + +type GetImageListEntryInput struct { + // The name of the Image List + Name string + // Version number of these machineImages in the imagelist. + Version int +} + +// Returns a populated ImageListEntryInfo struct from an input struct +func (c *ImageListEntriesClient) GetImageListEntry(input *GetImageListEntryInput) (*ImageListEntryInfo, error) { + c.updateClientPaths(input.Name, input.Version) + var imageListEntryInfo ImageListEntryInfo + if err := c.getResource("", &imageListEntryInfo); err != nil { + return nil, err + } + return c.success(&imageListEntryInfo) +} + +type DeleteImageListEntryInput struct { + // The name of the Image List + Name string + // Version number of these machineImages in the imagelist. + Version int +} + +func (c *ImageListEntriesClient) DeleteImageListEntry(input *DeleteImageListEntryInput) error { + c.updateClientPaths(input.Name, input.Version) + return c.deleteResource("") +} + +func (c *ImageListEntriesClient) updateClientPaths(name string, version int) { + var containerPath, resourcePath string + name = c.getQualifiedName(name) + containerPath = ImageListEntryContainerPath + name + "/entry/" + resourcePath = ImageListEntryContainerPath + name + "/entry" + if version != -1 { + containerPath = fmt.Sprintf("%s%d", containerPath, version) + resourcePath = fmt.Sprintf("%s/%d", resourcePath, version) + } + c.ContainerPath = containerPath + c.ResourceRootPath = resourcePath +} + +// Unqualifies any qualified fields in the IPNetworkInfo struct +func (c *ImageListEntriesClient) success(info *ImageListEntryInfo) (*ImageListEntryInfo, error) { + c.unqualifyUrl(&info.Uri) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go new file mode 100644 index 000000000..b697b36e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go @@ -0,0 +1,540 @@ +package compute + +import ( + "errors" + "fmt" + "strings" +) + +const WaitForInstanceReadyTimeout = 600 +const WaitForInstanceDeleteTimeout = 600 + +// InstancesClient is a client for the Instance functions of the Compute API. +type InstancesClient struct { + ResourceClient +} + +// Instances obtains an InstancesClient which can be used to access to the +// Instance functions of the Compute API +func (c *Client) Instances() *InstancesClient { + return &InstancesClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "instance", + ContainerPath: "/launchplan/", + ResourceRootPath: "/instance", + }} +} + +type InstanceState string + +const ( + InstanceRunning InstanceState = "running" + InstanceInitializing InstanceState = "initializing" + InstancePreparing InstanceState = "preparing" + InstanceStopping InstanceState = "stopping" + InstanceQueued InstanceState = "queued" + InstanceError InstanceState = "error" +) + +// InstanceInfo represents the Compute API's view of the state of an instance. +type InstanceInfo struct { + // The ID for the instance. Set by the SDK based on the request - not the API. + ID string + + // A dictionary of attributes to be made available to the instance. + // A value with the key "userdata" will be made available in an EC2-compatible manner. + Attributes map[string]interface{} `json:"attributes"` + + // The availability domain for the instance + AvailabilityDomain string `json:"availability_domain"` + + // Boot order list. + BootOrder []int `json:"boot_order"` + + // The default domain to use for the hostname and DNS lookups + Domain string `json:"domain"` + + // Optional ImageListEntry number. Default will be used if not specified + Entry int `json:"entry"` + + // The reason for the instance going to error state, if available. + ErrorReason string `json:"error_reason"` + + // SSH Server Fingerprint presented by the instance + Fingerprint string `json:"fingerprint"` + + // The hostname for the instance + Hostname string `json:"hostname"` + + // The format of the image + ImageFormat string `json:"image_format"` + + // Name of imagelist to be launched. + ImageList string `json:"imagelist"` + + // IP address of the instance. + IPAddress string `json:"ip"` + + // A label assigned by the user, specifically for defining inter-instance relationships. + Label string `json:"label"` + + // Name of this instance, generated by the server. + Name string `json:"name"` + + // Mapping of to network specifiers for virtual NICs to be attached to this instance. + Networking map[string]NetworkingInfo `json:"networking"` + + // A list of strings specifying arbitrary tags on nodes to be matched on placement. + PlacementRequirements []string `json:"placement_requirements"` + + // The OS platform for the instance. + Platform string `json:"platform"` + + // The priority at which this instance will be run + Priority string `json:"priority"` + + // Reference to the QuotaReservation, to be destroyed with the instance + QuotaReservation string `json:"quota_reservation"` + + // Array of relationship specifications to be satisfied on this instance's placement + Relationships []string `json:"relationships"` + + // Resolvers to use instead of the default resolvers + Resolvers []string `json:"resolvers"` + + // Add PTR records for the hostname + ReverseDNS bool `json:"reverse_dns"` + + // Type of instance, as defined on site configuration. + Shape string `json:"shape"` + + // Site to run on + Site string `json:"site"` + + // ID's of SSH keys that will be exposed to the instance. + SSHKeys []string `json:"sshkeys"` + + // The start time of the instance + StartTime string `json:"start_time"` + + // State of the instance. + State InstanceState `json:"state"` + + // The Storage Attachment information. + Storage []StorageAttachment `json:"storage_attachments"` + + // Array of tags associated with the instance. + Tags []string `json:"tags"` + + // vCable for this instance. + VCableID string `json:"vcable_id"` + + // Specify if the devices created for the instance are virtio devices. If not specified, the default + // will come from the cluster configuration file + Virtio bool `json:"virtio,omitempty"` + + // IP Address and port of the VNC console for the instance + VNC string `json:"vnc"` +} + +type StorageAttachment struct { + // The index number for the volume. + Index int `json:"index"` + + // The three-part name (/Compute-identity_domain/user/object) of the storage attachment. + Name string `json:"name"` + + // The three-part name (/Compute-identity_domain/user/object) of the storage volume attached to the instance. + StorageVolumeName string `json:"storage_volume_name"` +} + +func (i *InstanceInfo) getInstanceName() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, i.Name, i.ID) +} + +type CreateInstanceInput struct { + // A dictionary of user-defined attributes to be made available to the instance. + // Optional + Attributes map[string]interface{} `json:"attributes"` + // Boot order list + // Optional + BootOrder []int `json:"boot_order"` + // The host name assigned to the instance. On an Oracle Linux instance, + // this host name is displayed in response to the hostname command. + // Only relative DNS is supported. The domain name is suffixed to the host name + // that you specify. The host name must not end with a period. If you don't specify a + // host name, then a name is generated automatically. + // Optional + Hostname string `json:"hostname"` + // Name of imagelist to be launched. + // Optional + ImageList string `json:"imagelist"` + // A label assigned by the user, specifically for defining inter-instance relationships. + // Optional + Label string `json:"label"` + // Name of this instance, generated by the server. + // Optional + Name string `json:"name"` + // Networking information. + // Optional + Networking map[string]NetworkingInfo `json:"networking"` + // If set to true (default), then reverse DNS records are created. + // If set to false, no reverse DNS records are created. + // Optional + ReverseDNS bool `json:"reverse_dns,omitempty"` + // Type of instance, as defined on site configuration. + // Required + Shape string `json:"shape"` + // A list of the Storage Attachments you want to associate with the instance. + // Optional + Storage []StorageAttachmentInput `json:"storage_attachments"` + // A list of the SSH public keys that you want to associate with the instance. + // Optional + SSHKeys []string `json:"sshkeys"` + // A list of tags to be supplied to the instance + // Optional + Tags []string `json:"tags"` +} + +type StorageAttachmentInput struct { + // The index number for the volume. The allowed range is 1 to 10. + // If you want to use a storage volume as the boot disk for an instance, you must specify the index number for that volume as 1. + // The index determines the device name by which the volume is exposed to the instance. + Index int `json:"index"` + // The three-part name (/Compute-identity_domain/user/object) of the storage volume that you want to attach to the instance. + // Note that volumes attached to an instance at launch time can't be detached. + Volume string `json:"volume"` +} + +const ReservationPrefix = "ipreservation" +const ReservationIPPrefix = "network/v1/ipreservation" + +type NICModel string + +const ( + NICDefaultModel NICModel = "e1000" +) + +// Struct of Networking info from a populated instance, or to be used as input to create an instance +type NetworkingInfo struct { + // The DNS name for the Shared network (Required) + // DNS A Record for an IP Network (Optional) + DNS []string `json:"dns,omitempty"` + // IP Network only. + // If you want to associate a static private IP Address, + // specify that here within the range of the supplied IPNetwork attribute. + // Optional + IPAddress string `json:"ip,omitempty"` + // IP Network only. + // The name of the IP Network you want to add the instance to. + // Required + IPNetwork string `json:"ipnetwork,omitempty"` + // IP Network only. + // The hexadecimal MAC Address of the interface + // Optional + MACAddress string `json:"address,omitempty"` + // Shared Network only. + // The type of NIC used. Must be set to 'e1000' + // Required + Model NICModel `json:"model,omitempty"` + // IP Network and Shared Network + // The name servers that are sent through DHCP as option 6. + // You can specify a maximum of eight name server IP addresses per interface. + // Optional + NameServers []string `json:"name_servers,omitempty"` + // The names of an IP Reservation to associate in an IP Network (Optional) + // Indicates whether a temporary or permanent public IP Address should be assigned + // in a Shared Network (Required) + Nat []string `json:"nat,omitempty"` + // IP Network and Shared Network + // The search domains that should be sent through DHCP as option 119. + // You can enter a maximum of eight search domain zones per interface. + // Optional + SearchDomains []string `json:"search_domains,omitempty"` + // Shared Network only. + // The security lists that you want to add the instance to + // Required + SecLists []string `json:"seclists,omitempty"` + // IP Network Only + // The name of the vNIC + // Optional + Vnic string `json:"vnic,omitempty"` + // IP Network only. + // The names of the vNICSets you want to add the interface to. + // Optional + VnicSets []string `json:"vnicsets,omitempty"` +} + +// LaunchPlan defines a launch plan, used to launch instances with the supplied InstanceSpec(s) +type LaunchPlanInput struct { + // Describes an array of instances which should be launched + Instances []CreateInstanceInput `json:"instances"` +} + +type LaunchPlanResponse struct { + // An array of instances which have been launched + Instances []InstanceInfo `json:"instances"` +} + +// LaunchInstance creates and submits a LaunchPlan to launch a new instance. +func (c *InstancesClient) CreateInstance(input *CreateInstanceInput) (*InstanceInfo, error) { + qualifiedSSHKeys := []string{} + for _, key := range input.SSHKeys { + qualifiedSSHKeys = append(qualifiedSSHKeys, c.getQualifiedName(key)) + } + + input.SSHKeys = qualifiedSSHKeys + + qualifiedStorageAttachments := []StorageAttachmentInput{} + for _, attachment := range input.Storage { + qualifiedStorageAttachments = append(qualifiedStorageAttachments, StorageAttachmentInput{ + Index: attachment.Index, + Volume: c.getQualifiedName(attachment.Volume), + }) + } + input.Storage = qualifiedStorageAttachments + + input.Networking = c.qualifyNetworking(input.Networking) + + input.Name = fmt.Sprintf(CMP_QUALIFIED_NAME, c.getUserName(), input.Name) + + plan := LaunchPlanInput{Instances: []CreateInstanceInput{*input}} + + var responseBody LaunchPlanResponse + if err := c.createResource(&plan, &responseBody); err != nil { + return nil, err + } + + if len(responseBody.Instances) == 0 { + return nil, fmt.Errorf("No instance information returned: %#v", responseBody) + } + + // Call wait for instance ready now, as creating the instance is an eventually consistent operation + getInput := &GetInstanceInput{ + Name: input.Name, + ID: responseBody.Instances[0].ID, + } + + // Wait for instance to be ready and return the result + // Don't have to unqualify any objects, as the GetInstance method will handle that + return c.WaitForInstanceRunning(getInput, WaitForInstanceReadyTimeout) +} + +// Both of these fields are required. If they're not provided, things go wrong in +// incredibly amazing ways. +type GetInstanceInput struct { + // The Unqualified Name of this Instance + Name string + // The Unqualified ID of this Instance + ID string +} + +func (g *GetInstanceInput) String() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, g.Name, g.ID) +} + +// GetInstance retrieves information about an instance. +func (c *InstancesClient) GetInstance(input *GetInstanceInput) (*InstanceInfo, error) { + if input.ID == "" || input.Name == "" { + return nil, errors.New("Both instance name and ID need to be specified") + } + + var responseBody InstanceInfo + if err := c.getResource(input.String(), &responseBody); err != nil { + return nil, err + } + + if responseBody.Name == "" { + return nil, fmt.Errorf("Empty response body when requesting instance %s", input.Name) + } + + // The returned 'Name' attribute is the fully qualified instance name + "/" + ID + // Split these out to accurately populate the fields + nID := strings.Split(c.getUnqualifiedName(responseBody.Name), "/") + responseBody.Name = nID[0] + responseBody.ID = nID[1] + + c.unqualify(&responseBody.VCableID) + + // Unqualify SSH Key names + sshKeyNames := []string{} + for _, sshKeyRef := range responseBody.SSHKeys { + sshKeyNames = append(sshKeyNames, c.getUnqualifiedName(sshKeyRef)) + } + responseBody.SSHKeys = sshKeyNames + + responseBody.Networking = c.unqualifyNetworking(responseBody.Networking) + + return &responseBody, nil +} + +type DeleteInstanceInput struct { + // The Unqualified Name of this Instance + Name string + // The Unqualified ID of this Instance + ID string +} + +func (d *DeleteInstanceInput) String() string { + return fmt.Sprintf(CMP_QUALIFIED_NAME, d.Name, d.ID) +} + +// DeleteInstance deletes an instance. +func (c *InstancesClient) DeleteInstance(input *DeleteInstanceInput) error { + // Call to delete the instance + if err := c.deleteResource(input.String()); err != nil { + return err + } + // Wait for instance to be deleted + return c.WaitForInstanceDeleted(input, WaitForInstanceDeleteTimeout) +} + +// WaitForInstanceRunning waits for an instance to be completely initialized and available. +func (c *InstancesClient) WaitForInstanceRunning(input *GetInstanceInput, timeoutSeconds int) (*InstanceInfo, error) { + var info *InstanceInfo + var getErr error + err := c.waitFor("instance to be ready", timeoutSeconds, func() (bool, error) { + info, getErr = c.GetInstance(input) + if getErr != nil { + return false, getErr + } + switch s := info.State; s { + case InstanceError: + return false, fmt.Errorf("Error initializing instance: %s", info.ErrorReason) + case InstanceRunning: + c.debugLogString("Instance Running") + return true, nil + case InstanceQueued: + c.debugLogString("Instance Queuing") + return false, nil + case InstanceInitializing: + c.debugLogString("Instance Initializing") + return false, nil + case InstancePreparing: + c.debugLogString("Instance Preparing") + return false, nil + default: + c.debugLogString(fmt.Sprintf("Unknown instance state: %s, waiting", s)) + return false, nil + } + }) + return info, err +} + +// WaitForInstanceDeleted waits for an instance to be fully deleted. +func (c *InstancesClient) WaitForInstanceDeleted(input *DeleteInstanceInput, timeoutSeconds int) error { + return c.waitFor("instance to be deleted", timeoutSeconds, func() (bool, error) { + var info InstanceInfo + if err := c.getResource(input.String(), &info); err != nil { + if WasNotFoundError(err) { + // Instance could not be found, thus deleted + return true, nil + } + // Some other error occurred trying to get instance, exit + return false, err + } + switch s := info.State; s { + case InstanceError: + return false, fmt.Errorf("Error stopping instance: %s", info.ErrorReason) + case InstanceStopping: + c.debugLogString("Instance stopping") + return false, nil + default: + c.debugLogString(fmt.Sprintf("Unknown instance state: %s, waiting", s)) + return false, nil + } + }) +} + +func (c *InstancesClient) qualifyNetworking(info map[string]NetworkingInfo) map[string]NetworkingInfo { + qualifiedNetworks := map[string]NetworkingInfo{} + for k, v := range info { + qfd := v + sharedNetwork := false + if v.IPNetwork != "" { + // Network interface is for an IP Network + qfd.IPNetwork = c.getQualifiedName(v.IPNetwork) + sharedNetwork = true + } + if v.Vnic != "" { + qfd.Vnic = c.getQualifiedName(v.Vnic) + } + if v.Nat != nil { + qfd.Nat = c.qualifyNat(v.Nat, sharedNetwork) + } + if v.VnicSets != nil { + qfd.VnicSets = c.getQualifiedList(v.VnicSets) + } + if v.SecLists != nil { + // Network interface is for the shared network + secLists := []string{} + for _, v := range v.SecLists { + secLists = append(secLists, c.getQualifiedName(v)) + } + qfd.SecLists = secLists + } + + qualifiedNetworks[k] = qfd + } + return qualifiedNetworks +} + +func (c *InstancesClient) unqualifyNetworking(info map[string]NetworkingInfo) map[string]NetworkingInfo { + // Unqualify ip network + unqualifiedNetworks := map[string]NetworkingInfo{} + for k, v := range info { + unq := v + if v.IPNetwork != "" { + unq.IPNetwork = c.getUnqualifiedName(v.IPNetwork) + } + if v.Vnic != "" { + unq.Vnic = c.getUnqualifiedName(v.Vnic) + } + if v.Nat != nil { + unq.Nat = c.unqualifyNat(v.Nat) + } + if v.VnicSets != nil { + unq.VnicSets = c.getUnqualifiedList(v.VnicSets) + } + if v.SecLists != nil { + secLists := []string{} + for _, v := range v.SecLists { + secLists = append(secLists, c.getUnqualifiedName(v)) + } + v.SecLists = secLists + } + unqualifiedNetworks[k] = unq + } + return unqualifiedNetworks +} + +func (c *InstancesClient) qualifyNat(nat []string, shared bool) []string { + qualifiedNats := []string{} + for _, v := range nat { + if strings.HasPrefix(v, "ippool:/oracle") { + qualifiedNats = append(qualifiedNats, v) + continue + } + prefix := ReservationPrefix + if shared { + prefix = ReservationIPPrefix + } + qualifiedNats = append(qualifiedNats, fmt.Sprintf("%s:%s", prefix, c.getQualifiedName(v))) + } + return qualifiedNats +} + +func (c *InstancesClient) unqualifyNat(nat []string) []string { + unQualifiedNats := []string{} + for _, v := range nat { + if strings.HasPrefix(v, "ippool:/oracle") { + unQualifiedNats = append(unQualifiedNats, v) + continue + } + n := strings.Split(v, ":") + u := n[1] + unQualifiedNats = append(unQualifiedNats, c.getUnqualifiedName(u)) + } + return unQualifiedNats +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go new file mode 100644 index 000000000..f685fa2d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_prefix_set.go @@ -0,0 +1,135 @@ +package compute + +const ( + IPAddressPrefixSetDescription = "ip address prefix set" + IPAddressPrefixSetContainerPath = "/network/v1/ipaddressprefixset/" + IPAddressPrefixSetResourcePath = "/network/v1/ipaddressprefixset" +) + +type IPAddressPrefixSetsClient struct { + ResourceClient +} + +// IPAddressPrefixSets() returns an IPAddressPrefixSetsClient that can be used to access the +// necessary CRUD functions for IP Address Prefix Sets. +func (c *Client) IPAddressPrefixSets() *IPAddressPrefixSetsClient { + return &IPAddressPrefixSetsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: IPAddressPrefixSetDescription, + ContainerPath: IPAddressPrefixSetContainerPath, + ResourceRootPath: IPAddressPrefixSetResourcePath, + }, + } +} + +// IPAddressPrefixSetInfo contains the exported fields necessary to hold all the information about an +// IP Address Prefix Set +type IPAddressPrefixSetInfo struct { + // The name of the IP Address Prefix Set + Name string `json:"name"` + // Description of the IP Address Prefix Set + Description string `json:"description"` + // List of CIDR IPv4 prefixes assigned in the virtual network. + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + // Slice of tags associated with the IP Address Prefix Set + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Address Prefix Set + Uri string `json:"uri"` +} + +type CreateIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPAddressPrefixSet + // Optional + Description string `json:"description"` + + // List of CIDR IPv4 prefixes assigned in the virtual network. + // Optional + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + + // String slice of tags to apply to the IP Address Prefix Set object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Address Prefix Set from an IPAddressPrefixSetsClient and an input struct. +// Returns a populated Info struct for the IP Address Prefix Set, and any errors +func (c *IPAddressPrefixSetsClient) CreateIPAddressPrefixSet(input *CreateIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressPrefixSetInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPAddressPrefixSetInfo struct from an input struct +func (c *IPAddressPrefixSetsClient) GetIPAddressPrefixSet(input *GetIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressPrefixSetInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateIPAddressPrefixSetInput defines what to update in a ip address prefix set +type UpdateIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPAddressPrefixSet + // Optional + Description string `json:"description"` + + // List of CIDR IPv4 prefixes assigned in the virtual network. + IPAddressPrefixes []string `json:"ipAddressPrefixes"` + + // String slice of tags to apply to the IP Address Prefix Set object + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPAddressPrefixSet update the ip address prefix set +func (c *IPAddressPrefixSetsClient) UpdateIPAddressPrefixSet(updateInput *UpdateIPAddressPrefixSetInput) (*IPAddressPrefixSetInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var ipInfo IPAddressPrefixSetInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPAddressPrefixSetInput struct { + // The name of the IP Address Prefix Set to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPAddressPrefixSetsClient) DeleteIPAddressPrefixSet(input *DeleteIPAddressPrefixSetInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPAddressPrefixSetInfo struct +func (c *IPAddressPrefixSetsClient) success(info *IPAddressPrefixSetInfo) (*IPAddressPrefixSetInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go new file mode 100644 index 000000000..67e92d4ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_reservations.go @@ -0,0 +1,190 @@ +package compute + +import ( + "fmt" + "path/filepath" +) + +// IPAddressReservationsClient is a client to manage ip address reservation resources +type IPAddressReservationsClient struct { + *ResourceClient +} + +const ( + IPAddressReservationDescription = "IP Address Reservation" + IPAddressReservationContainerPath = "/network/v1/ipreservation/" + IPAddressReservationResourcePath = "/network/v1/ipreservation" + IPAddressReservationQualifier = "/oracle/public" +) + +// IPAddressReservations returns an IPAddressReservationsClient to manage IP address reservation +// resources +func (c *Client) IPAddressReservations() *IPAddressReservationsClient { + return &IPAddressReservationsClient{ + ResourceClient: &ResourceClient{ + Client: c, + ResourceDescription: IPAddressReservationDescription, + ContainerPath: IPAddressReservationContainerPath, + ResourceRootPath: IPAddressReservationResourcePath, + }, + } +} + +// IPAddressReservation describes an IP Address reservation +type IPAddressReservation struct { + // Description of the IP Address Reservation + Description string `json:"description"` + + // Reserved NAT IPv4 address from the IP Address Pool + IPAddress string `json:"ipAddress"` + + // Name of the IP Address pool to reserve the NAT IP from + IPAddressPool string `json:"ipAddressPool"` + + // Name of the reservation + Name string `json:"name"` + + // Tags associated with the object + Tags []string `json:"tags"` + + // Uniform Resource Identified for the reservation + Uri string `json:"uri"` +} + +const ( + PublicIPAddressPool = "public-ippool" + PrivateIPAddressPool = "cloud-ippool" +) + +// CreateIPAddressReservationInput defines input parameters to create an ip address reservation +type CreateIPAddressReservationInput struct { + // Description of the IP Address Reservation + // Optional + Description string `json:"description"` + + // IP Address pool from which to reserve an IP Address. + // Can be one of the following: + // + // 'public-ippool' - When you attach an IP Address from this pool to an instance, you enable + // access between the public Internet and the instance + // 'cloud-ippool' - When you attach an IP Address from this pool to an instance, the instance + // can communicate privately with other Oracle Cloud Services + // Optional + IPAddressPool string `json:"ipAddressPool"` + + // The name of the reservation to create + // Required + Name string `json:"name"` + + // Tags to associate with the IP Reservation + // Optional + Tags []string `json:"tags"` +} + +// Takes an input struct, creates an IP Address reservation, and returns the info struct and any errors +func (c *IPAddressReservationsClient) CreateIPAddressReservation(input *CreateIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + // Qualify supplied name + input.Name = c.getQualifiedName(input.Name) + // Qualify supplied address pool if not nil + if input.IPAddressPool != "" { + input.IPAddressPool = c.qualifyIPAddressPool(input.IPAddressPool) + } + + if err := c.createResource(input, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to retrieve information on an ip address reservation +type GetIPAddressReservationInput struct { + // Name of the IP Reservation + // Required + Name string `json:"name"` +} + +// Returns an IP Address Reservation and any errors +func (c *IPAddressReservationsClient) GetIPAddressReservation(input *GetIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to update an IP Address reservation +type UpdateIPAddressReservationInput struct { + // Description of the IP Address Reservation + // Optional + Description string `json:"description"` + + // IP Address pool from which to reserve an IP Address. + // Can be one of the following: + // + // 'public-ippool' - When you attach an IP Address from this pool to an instance, you enable + // access between the public Internet and the instance + // 'cloud-ippool' - When you attach an IP Address from this pool to an instance, the instance + // can communicate privately with other Oracle Cloud Services + // Optional + IPAddressPool string `json:"ipAddressPool"` + + // The name of the reservation to create + // Required + Name string `json:"name"` + + // Tags to associate with the IP Reservation + // Optional + Tags []string `json:"tags"` +} + +func (c *IPAddressReservationsClient) UpdateIPAddressReservation(input *UpdateIPAddressReservationInput) (*IPAddressReservation, error) { + var ipAddrRes IPAddressReservation + + // Qualify supplied name + input.Name = c.getQualifiedName(input.Name) + // Qualify supplied address pool if not nil + if input.IPAddressPool != "" { + input.IPAddressPool = c.qualifyIPAddressPool(input.IPAddressPool) + } + + if err := c.updateResource(input.Name, input, &ipAddrRes); err != nil { + return nil, err + } + + return c.success(&ipAddrRes) +} + +// Parameters to delete an IP Address Reservation +type DeleteIPAddressReservationInput struct { + // The name of the reservation to delete + Name string `json:"name"` +} + +func (c *IPAddressReservationsClient) DeleteIPAddressReservation(input *DeleteIPAddressReservationInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *IPAddressReservationsClient) success(result *IPAddressReservation) (*IPAddressReservation, error) { + c.unqualify(&result.Name) + if result.IPAddressPool != "" { + result.IPAddressPool = c.unqualifyIPAddressPool(result.IPAddressPool) + } + + return result, nil +} + +func (c *IPAddressReservationsClient) qualifyIPAddressPool(input string) string { + // Add '/oracle/public/' + return fmt.Sprintf("%s/%s", IPAddressReservationQualifier, input) +} + +func (c *IPAddressReservationsClient) unqualifyIPAddressPool(input string) string { + // Remove '/oracle/public/' + return filepath.Base(input) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go new file mode 100644 index 000000000..0bfdcbc9d --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_associations.go @@ -0,0 +1,118 @@ +package compute + +import ( + "fmt" + "strings" +) + +// IPAssociationsClient is a client for the IP Association functions of the Compute API. +type IPAssociationsClient struct { + *ResourceClient +} + +// IPAssociations obtains a IPAssociationsClient which can be used to access to the +// IP Association functions of the Compute API +func (c *Client) IPAssociations() *IPAssociationsClient { + return &IPAssociationsClient{ + ResourceClient: &ResourceClient{ + Client: c, + ResourceDescription: "ip association", + ContainerPath: "/ip/association/", + ResourceRootPath: "/ip/association", + }} +} + +// IPAssociationInfo describes an existing IP association. +type IPAssociationInfo struct { + // TODO: it'd probably make sense to expose the `ip` field here too? + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The three-part name of the IP reservation object in the format (/Compute-identity_domain/user/object). + // An IP reservation is a public IP address which is attached to an Oracle Compute Cloud Service instance that requires access to or from the Internet. + Reservation string `json:"reservation"` + + // The type of IP Address to associate with this instance + // for a Dynamic IP address specify `ippool:/oracle/public/ippool`. + // for a Static IP address specify the three part name of the existing IP reservation + ParentPool string `json:"parentpool"` + + // Uniform Resource Identifier for the IP Association + URI string `json:"uri"` + + // The three-part name of a vcable ID of an instance that is associated with the IP reservation. + VCable string `json:"vcable"` +} + +type CreateIPAssociationInput struct { + // The type of IP Address to associate with this instance + // for a Dynamic IP address specify `ippool:/oracle/public/ippool`. + // for a Static IP address specify the three part name of the existing IP reservation + // Required + ParentPool string `json:"parentpool"` + + // The three-part name of the vcable ID of the instance that you want to associate with an IP address. The three-part name is in the format: /Compute-identity_domain/user/object. + // Required + VCable string `json:"vcable"` +} + +// CreateIPAssociation creates a new IP association with the supplied vcable and parentpool. +func (c *IPAssociationsClient) CreateIPAssociation(input *CreateIPAssociationInput) (*IPAssociationInfo, error) { + input.VCable = c.getQualifiedName(input.VCable) + input.ParentPool = c.getQualifiedParentPoolName(input.ParentPool) + var assocInfo IPAssociationInfo + if err := c.createResource(input, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +type GetIPAssociationInput struct { + // The three-part name of the IP Association + // Required. + Name string `json:"name"` +} + +// GetIPAssociation retrieves the IP association with the given name. +func (c *IPAssociationsClient) GetIPAssociation(input *GetIPAssociationInput) (*IPAssociationInfo, error) { + var assocInfo IPAssociationInfo + if err := c.getResource(input.Name, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +type DeleteIPAssociationInput struct { + // The three-part name of the IP Association + // Required. + Name string `json:"name"` +} + +// DeleteIPAssociation deletes the IP association with the given name. +func (c *IPAssociationsClient) DeleteIPAssociation(input *DeleteIPAssociationInput) error { + return c.deleteResource(input.Name) +} + +func (c *IPAssociationsClient) getQualifiedParentPoolName(parentpool string) string { + parts := strings.Split(parentpool, ":") + pooltype := parts[0] + name := parts[1] + return fmt.Sprintf("%s:%s", pooltype, c.getQualifiedName(name)) +} + +func (c *IPAssociationsClient) unqualifyParentPoolName(parentpool *string) { + parts := strings.Split(*parentpool, ":") + pooltype := parts[0] + name := parts[1] + *parentpool = fmt.Sprintf("%s:%s", pooltype, c.getUnqualifiedName(name)) +} + +// Unqualifies identifiers +func (c *IPAssociationsClient) success(assocInfo *IPAssociationInfo) (*IPAssociationInfo, error) { + c.unqualify(&assocInfo.Name, &assocInfo.VCable) + c.unqualifyParentPoolName(&assocInfo.ParentPool) + return assocInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go new file mode 100644 index 000000000..9a20380f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_network_exchange.go @@ -0,0 +1,99 @@ +package compute + +const ( + IPNetworkExchangeDescription = "ip network exchange" + IPNetworkExchangeContainerPath = "/network/v1/ipnetworkexchange/" + IPNetworkExchangeResourcePath = "/network/v1/ipnetworkexchange" +) + +type IPNetworkExchangesClient struct { + ResourceClient +} + +// IPNetworkExchanges() returns an IPNetworkExchangesClient that can be used to access the +// necessary CRUD functions for IP Network Exchanges. +func (c *Client) IPNetworkExchanges() *IPNetworkExchangesClient { + return &IPNetworkExchangesClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: IPNetworkExchangeDescription, + ContainerPath: IPNetworkExchangeContainerPath, + ResourceRootPath: IPNetworkExchangeResourcePath, + }, + } +} + +// IPNetworkExchangeInfo contains the exported fields necessary to hold all the information about an +// IP Network Exchange +type IPNetworkExchangeInfo struct { + // The name of the IP Network Exchange + Name string `json:"name"` + // Description of the IP Network Exchange + Description string `json:"description"` + // Slice of tags associated with the IP Network Exchange + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Network Exchange + Uri string `json:"uri"` +} + +type CreateIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the IPNetworkExchange + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Network Exchange object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Network Exchange from an IPNetworkExchangesClient and an input struct. +// Returns a populated Info struct for the IP Network Exchange, and any errors +func (c *IPNetworkExchangesClient) CreateIPNetworkExchange(input *CreateIPNetworkExchangeInput) (*IPNetworkExchangeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkExchangeInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPNetworkExchangeInfo struct from an input struct +func (c *IPNetworkExchangesClient) GetIPNetworkExchange(input *GetIPNetworkExchangeInput) (*IPNetworkExchangeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkExchangeInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPNetworkExchangeInput struct { + // The name of the IP Network Exchange to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPNetworkExchangesClient) DeleteIPNetworkExchange(input *DeleteIPNetworkExchangeInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkExchangeInfo struct +func (c *IPNetworkExchangesClient) success(info *IPNetworkExchangeInfo) (*IPNetworkExchangeInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go new file mode 100644 index 000000000..3705eef29 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_networks.go @@ -0,0 +1,186 @@ +package compute + +const ( + IPNetworkDescription = "ip network" + IPNetworkContainerPath = "/network/v1/ipnetwork/" + IPNetworkResourcePath = "/network/v1/ipnetwork" +) + +type IPNetworksClient struct { + ResourceClient +} + +// IPNetworks() returns an IPNetworksClient that can be used to access the +// necessary CRUD functions for IP Networks. +func (c *Client) IPNetworks() *IPNetworksClient { + return &IPNetworksClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: IPNetworkDescription, + ContainerPath: IPNetworkContainerPath, + ResourceRootPath: IPNetworkResourcePath, + }, + } +} + +// IPNetworkInfo contains the exported fields necessary to hold all the information about an +// IP Network +type IPNetworkInfo struct { + // The name of the IP Network + Name string `json:"name"` + // The CIDR IPv4 prefix associated with the IP Network + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the IP Network Exchange associated with the IP Network + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + // Description of the IP Network + Description string `json:"description"` + // Whether public internet access was enabled using NAPT for VNICs without any public IP reservation + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + // Slice of tags associated with the IP Network + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Network + Uri string `json:"uri"` +} + +type CreateIPNetworkInput struct { + // The name of the IP Network to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Specify the size of the IP Subnet. It is a range of IPv4 addresses assigned in the virtual + // network, in CIDR address prefix format. + // While specifying the IP address prefix take care of the following points: + // + //* These IP addresses aren't part of the common pool of Oracle-provided IP addresses used by the shared network. + // + //* There's no conflict with the range of IP addresses used in another IP network, the IP addresses used your on-premises network, or with the range of private IP addresses used in the shared network. If IP networks with overlapping IP subnets are linked to an IP exchange, packets going to and from those IP networks are dropped. + // + //* The upper limit of the CIDR block size for an IP network is /16. + // + //Note: The first IP address of any IP network is reserved for the default gateway, the DHCP server, and the DNS server of that IP network. + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + + //Specify the IP network exchange to which the IP network belongs. + //You can add an IP network to only one IP network exchange, but an IP network exchange + //can include multiple IP networks. An IP network exchange enables access between IP networks + //that have non-overlapping addresses, so that instances on these networks can exchange packets + //with each other without NAT. + // Optional + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + + // Description of the IPNetwork + // Optional + Description string `json:"description"` + + // Enable public internet access using NAPT for VNICs without any public IP reservation + // Optional + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + + // String slice of tags to apply to the IP Network object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Network from an IPNetworksClient and an input struct. +// Returns a populated Info struct for the IP Network, and any errors +func (c *IPNetworksClient) CreateIPNetwork(input *CreateIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPNetworkExchange = c.getQualifiedName(input.IPNetworkExchange) + + var ipInfo IPNetworkInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPNetworkInput struct { + // The name of the IP Network to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPNetworkInfo struct from an input struct +func (c *IPNetworksClient) GetIPNetwork(input *GetIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPNetworkInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type UpdateIPNetworkInput struct { + // The name of the IP Network to update. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Specify the size of the IP Subnet. It is a range of IPv4 addresses assigned in the virtual + // network, in CIDR address prefix format. + // While specifying the IP address prefix take care of the following points: + // + //* These IP addresses aren't part of the common pool of Oracle-provided IP addresses used by the shared network. + // + //* There's no conflict with the range of IP addresses used in another IP network, the IP addresses used your on-premises network, or with the range of private IP addresses used in the shared network. If IP networks with overlapping IP subnets are linked to an IP exchange, packets going to and from those IP networks are dropped. + // + //* The upper limit of the CIDR block size for an IP network is /16. + // + //Note: The first IP address of any IP network is reserved for the default gateway, the DHCP server, and the DNS server of that IP network. + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + + //Specify the IP network exchange to which the IP network belongs. + //You can add an IP network to only one IP network exchange, but an IP network exchange + //can include multiple IP networks. An IP network exchange enables access between IP networks + //that have non-overlapping addresses, so that instances on these networks can exchange packets + //with each other without NAT. + // Optional + IPNetworkExchange string `json:"ipNetworkExchange,omitempty"` + + // Description of the IPNetwork + // Optional + Description string `json:"description"` + + // Enable public internet access using NAPT for VNICs without any public IP reservation + // Optional + PublicNaptEnabled bool `json:"publicNaptEnabledFlag"` + + // String slice of tags to apply to the IP Network object + // Optional + Tags []string `json:"tags"` +} + +func (c *IPNetworksClient) UpdateIPNetwork(input *UpdateIPNetworkInput) (*IPNetworkInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPNetworkExchange = c.getQualifiedName(input.IPNetworkExchange) + + var ipInfo IPNetworkInfo + if err := c.updateResource(input.Name, &input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPNetworkInput struct { + // The name of the IP Network to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPNetworksClient) DeleteIPNetwork(input *DeleteIPNetworkInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkInfo struct +func (c *IPNetworksClient) success(info *IPNetworkInfo) (*IPNetworkInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.IPNetworkExchange) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go new file mode 100644 index 000000000..30cb47cd6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_reservations.go @@ -0,0 +1,147 @@ +package compute + +// IPReservationsClient is a client for the IP Reservations functions of the Compute API. +type IPReservationsClient struct { + *ResourceClient +} + +const ( + IPReservationDesc = "ip reservation" + IPReservationContainerPath = "/ip/reservation/" + IPReservataionResourcePath = "/ip/reservation" +) + +// IPReservations obtains an IPReservationsClient which can be used to access to the +// IP Reservations functions of the Compute API +func (c *Client) IPReservations() *IPReservationsClient { + return &IPReservationsClient{ + ResourceClient: &ResourceClient{ + Client: c, + ResourceDescription: IPReservationDesc, + ContainerPath: IPReservationContainerPath, + ResourceRootPath: IPReservataionResourcePath, + }} +} + +type IPReservationPool string + +const ( + PublicReservationPool IPReservationPool = "/oracle/public/ippool" +) + +// IPReservationInput describes an existing IP reservation. +type IPReservation struct { + // Shows the default account for your identity domain. + Account string `json:"account"` + // Public IP address. + IP string `json:"ip"` + // The three-part name of the IP Reservation (/Compute-identity_domain/user/object). + Name string `json:"name"` + // Pool of public IP addresses + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservation. + Tags []string `json:"tags"` + // Uniform Resource Identifier + Uri string `json:"uri"` + // Is the IP reservation associated with an instance? + Used bool `json:"used"` +} + +// CreateIPReservationInput defines an IP reservation to be created. +type CreateIPReservationInput struct { + // The name of the object + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. + // Optional + Name string `json:"name"` + // Pool of public IP addresses. This must be set to `ippool` + // Required + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + // Required + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservations. + // Optional + Tags []string `json:"tags"` +} + +// CreateIPReservation creates a new IP reservation with the given parentpool, tags and permanent flag. +func (c *IPReservationsClient) CreateIPReservation(input *CreateIPReservationInput) (*IPReservation, error) { + var ipInput IPReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.createResource(input, &ipInput); err != nil { + return nil, err + } + + return c.success(&ipInput) +} + +// GetIPReservationInput defines an IP Reservation to get +type GetIPReservationInput struct { + // The name of the IP Reservation + // Required + Name string +} + +// GetIPReservation retrieves the IP reservation with the given name. +func (c *IPReservationsClient) GetIPReservation(input *GetIPReservationInput) (*IPReservation, error) { + var ipInput IPReservation + + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &ipInput); err != nil { + return nil, err + } + + return c.success(&ipInput) +} + +// UpdateIPReservationInput defines an IP Reservation to be updated +type UpdateIPReservationInput struct { + // The name of the object + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. + // Required + Name string `json:"name"` + // Pool of public IP addresses. + // Required + ParentPool IPReservationPool `json:"parentpool"` + // Is the IP Reservation Persistent (i.e. static) or not (i.e. Dynamic)? + // Required + Permanent bool `json:"permanent"` + // A comma-separated list of strings which helps you to identify IP reservations. + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPReservation updates the IP reservation. +func (c *IPReservationsClient) UpdateIPReservation(input *UpdateIPReservationInput) (*IPReservation, error) { + var updateOutput IPReservation + input.Name = c.getQualifiedName(input.Name) + if err := c.updateResource(input.Name, input, &updateOutput); err != nil { + return nil, err + } + return c.success(&updateOutput) +} + +// DeleteIPReservationInput defines an IP Reservation to delete +type DeleteIPReservationInput struct { + // The name of the IP Reservation + // Required + Name string +} + +// DeleteIPReservation deletes the IP reservation with the given name. +func (c *IPReservationsClient) DeleteIPReservation(input *DeleteIPReservationInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *IPReservationsClient) success(result *IPReservation) (*IPReservation, error) { + c.unqualify(&result.Name) + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/logging.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/logging.go new file mode 100644 index 000000000..8fde2e5f5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/logging.go @@ -0,0 +1,28 @@ +package compute + +import ( + "bytes" + "fmt" + "net/http" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +// Log a string if debug logs are on +func (c *Client) debugLogString(str string) { + if c.loglevel != opc.LogDebug { + return + } + c.logger.Log(fmt.Sprintf("[DEBUG]: %s", str)) +} + +func (c *Client) debugLogReq(req *http.Request) { + // Don't need to log this if not debugging + if c.loglevel != opc.LogDebug { + return + } + buf := new(bytes.Buffer) + buf.ReadFrom(req.Body) + c.logger.Log(fmt.Sprintf("DEBUG: HTTP %s Req %s: %s", + req.Method, req.URL.String(), buf.String())) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/resource_client.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/resource_client.go new file mode 100644 index 000000000..7b133c634 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/resource_client.go @@ -0,0 +1,94 @@ +package compute + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +// ResourceClient is an AuthenticatedClient with some additional information about the resources to be addressed. +type ResourceClient struct { + *Client + ResourceDescription string + ContainerPath string + ResourceRootPath string +} + +func (c *ResourceClient) createResource(requestBody interface{}, responseBody interface{}) error { + resp, err := c.executeRequest("POST", c.ContainerPath, requestBody) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) updateResource(name string, requestBody interface{}, responseBody interface{}) error { + resp, err := c.executeRequest("PUT", c.getObjectPath(c.ResourceRootPath, name), requestBody) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) getResource(name string, responseBody interface{}) error { + var objectPath string + if name != "" { + objectPath = c.getObjectPath(c.ResourceRootPath, name) + } else { + objectPath = c.ResourceRootPath + } + resp, err := c.executeRequest("GET", objectPath, nil) + if err != nil { + return err + } + + return c.unmarshalResponseBody(resp, responseBody) +} + +func (c *ResourceClient) deleteResource(name string) error { + var objectPath string + if name != "" { + objectPath = c.getObjectPath(c.ResourceRootPath, name) + } else { + objectPath = c.ResourceRootPath + } + _, err := c.executeRequest("DELETE", objectPath, nil) + if err != nil { + return err + } + + // No errors and no response body to write + return nil +} + +func (c *ResourceClient) unmarshalResponseBody(resp *http.Response, iface interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + c.debugLogString(fmt.Sprintf("HTTP Resp (%d): %s", resp.StatusCode, buf.String())) + // JSON decode response into interface + var tmp interface{} + dcd := json.NewDecoder(buf) + if err := dcd.Decode(&tmp); err != nil { + return err + } + + // Use mapstructure to weakly decode into the resulting interface + msdcd, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: iface, + TagName: "json", + }) + if err != nil { + return err + } + + if err := msdcd.Decode(tmp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go new file mode 100644 index 000000000..c15303fd7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/routes.go @@ -0,0 +1,153 @@ +package compute + +const ( + RoutesDescription = "IP Network Route" + RoutesContainerPath = "/network/v1/route/" + RoutesResourcePath = "/network/v1/route" +) + +type RoutesClient struct { + ResourceClient +} + +func (c *Client) Routes() *RoutesClient { + return &RoutesClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: RoutesDescription, + ContainerPath: RoutesContainerPath, + ResourceRootPath: RoutesResourcePath, + }, + } +} + +type RouteInfo struct { + // Admin distance associated with this route + AdminDistance int `json:"adminDistance"` + // Description of the route + Description string `json:"description"` + // CIDR IPv4 Prefix associated with this route + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route + Name string `json:"name"` + // Name of the VNIC set associated with the route + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of Tags associated with the route + Tags []string `json:"tags,omitempty"` + // Uniform resource identifier associated with the route + Uri string `json:"uri"` +} + +type CreateRouteInput struct { + // Specify 0,1, or 2 as the route's administrative distance. + // If you do not specify a value, the default value is 0. + // The same prefix can be used in multiple routes. In this case, packets are routed over all the matching + // routes with the lowest administrative distance. + // In the case multiple routes with the same lowest administrative distance match, + // routing occurs over all these routes using ECMP. + // Optional + AdminDistance int `json:"adminDistance"` + // Description of the route + // Optional + Description string `json:"description"` + // The IPv4 address prefix in CIDR format, of the external network (external to the vNIC set) + // from which you want to route traffic + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route. + // Names can only contain alphanumeric, underscore, dash, and period characters. Case-sensitive + // Required + Name string `json:"name"` + // Name of the virtual NIC set to route matching packets to. + // Routed flows are load-balanced among all the virtual NICs in the virtual NIC set + // Required + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of tags to be associated with the route + // Optional + Tags []string `json:"tags,omitempty"` +} + +func (c *RoutesClient) CreateRoute(input *CreateRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.NextHopVnicSet = c.getQualifiedName(input.NextHopVnicSet) + + var routeInfo RouteInfo + if err := c.createResource(&input, &routeInfo); err != nil { + return nil, err + } + + return c.success(&routeInfo) +} + +type GetRouteInput struct { + // Name of the Route to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *RoutesClient) GetRoute(input *GetRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var routeInfo RouteInfo + if err := c.getResource(input.Name, &routeInfo); err != nil { + return nil, err + } + return c.success(&routeInfo) +} + +type UpdateRouteInput struct { + // Specify 0,1, or 2 as the route's administrative distance. + // If you do not specify a value, the default value is 0. + // The same prefix can be used in multiple routes. In this case, packets are routed over all the matching + // routes with the lowest administrative distance. + // In the case multiple routes with the same lowest administrative distance match, + // routing occurs over all these routes using ECMP. + // Optional + AdminDistance int `json:"adminDistance"` + // Description of the route + // Optional + Description string `json:"description"` + // The IPv4 address prefix in CIDR format, of the external network (external to the vNIC set) + // from which you want to route traffic + // Required + IPAddressPrefix string `json:"ipAddressPrefix"` + // Name of the route. + // Names can only contain alphanumeric, underscore, dash, and period characters. Case-sensitive + // Required + Name string `json:"name"` + // Name of the virtual NIC set to route matching packets to. + // Routed flows are load-balanced among all the virtual NICs in the virtual NIC set + // Required + NextHopVnicSet string `json:"nextHopVnicSet"` + // Slice of tags to be associated with the route + // Optional + Tags []string `json:"tags"` +} + +func (c *RoutesClient) UpdateRoute(input *UpdateRouteInput) (*RouteInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.NextHopVnicSet = c.getQualifiedName(input.NextHopVnicSet) + + var routeInfo RouteInfo + if err := c.updateResource(input.Name, &input, &routeInfo); err != nil { + return nil, err + } + + return c.success(&routeInfo) +} + +type DeleteRouteInput struct { + // Name of the Route to delete. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *RoutesClient) DeleteRoute(input *DeleteRouteInput) error { + return c.deleteResource(input.Name) +} + +func (c *RoutesClient) success(info *RouteInfo) (*RouteInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.NextHopVnicSet) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go new file mode 100644 index 000000000..c16e9d78b --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/sec_rules.go @@ -0,0 +1,193 @@ +package compute + +// SecRulesClient is a client for the Sec Rules functions of the Compute API. +type SecRulesClient struct { + ResourceClient +} + +// SecRules obtains a SecRulesClient which can be used to access to the +// Sec Rules functions of the Compute API +func (c *Client) SecRules() *SecRulesClient { + return &SecRulesClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "security ip list", + ContainerPath: "/secrule/", + ResourceRootPath: "/secrule", + }} +} + +// SecRuleInfo describes an existing sec rule. +type SecRuleInfo struct { + // Set this parameter to PERMIT. + Action string `json:"action"` + // The name of the security application + Application string `json:"application"` + // A description of the sec rule + Description string `json:"description"` + // Indicates whether the security rule is enabled + Disabled bool `json:"disabled"` + // The name of the destination security list or security IP list. + DestinationList string `json:"dst_list"` + // The name of the sec rule + Name string `json:"name"` + // The name of the source security list or security IP list. + SourceList string `json:"src_list"` + // Uniform Resource Identifier for the sec rule + URI string `json:"uri"` +} + +// CreateSecRuleInput defines a sec rule to be created. +type CreateSecRuleInput struct { + // Set this parameter to PERMIT. + // Required + Action string `json:"action"` + + // The name of the security application for user-defined or predefined security applications. + // Required + Application string `json:"application"` + + // Description of the IP Network + // Optional + Description string `json:"description"` + + // Indicates whether the sec rule is enabled (set to false) or disabled (true). + // The default setting is false. + // Optional + Disabled bool `json:"disabled"` + + // The name of the destination security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // You can specify a security IP list as the destination in a secrule, + // provided src_list is a security list that has DENY as its outbound policy. + // + // You cannot specify any of the security IP lists in the /oracle/public container + // as a destination in a secrule. + // Required + DestinationList string `json:"dst_list"` + + // The name of the Sec Rule to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the source security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // Required + SourceList string `json:"src_list"` +} + +// CreateSecRule creates a new sec rule. +func (c *SecRulesClient) CreateSecRule(createInput *CreateSecRuleInput) (*SecRuleInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + createInput.SourceList = c.getQualifiedListName(createInput.SourceList) + createInput.DestinationList = c.getQualifiedListName(createInput.DestinationList) + createInput.Application = c.getQualifiedName(createInput.Application) + + var ruleInfo SecRuleInfo + if err := c.createResource(createInput, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// GetSecRuleInput describes the Sec Rule to get +type GetSecRuleInput struct { + // The name of the Sec Rule to query for + // Required + Name string `json:"name"` +} + +// GetSecRule retrieves the sec rule with the given name. +func (c *SecRulesClient) GetSecRule(getInput *GetSecRuleInput) (*SecRuleInfo, error) { + var ruleInfo SecRuleInfo + if err := c.getResource(getInput.Name, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// UpdateSecRuleInput describes a secruity rule to update +type UpdateSecRuleInput struct { + // Set this parameter to PERMIT. + // Required + Action string `json:"action"` + + // The name of the security application for user-defined or predefined security applications. + // Required + Application string `json:"application"` + + // Description of the IP Network + // Optional + Description string `json:"description"` + + // Indicates whether the sec rule is enabled (set to false) or disabled (true). + // The default setting is false. + // Optional + Disabled bool `json:"disabled"` + + // The name of the destination security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // You can specify a security IP list as the destination in a secrule, + // provided src_list is a security list that has DENY as its outbound policy. + // + // You cannot specify any of the security IP lists in the /oracle/public container + // as a destination in a secrule. + // Required + DestinationList string `json:"dst_list"` + + // The name of the Sec Rule to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the source security list or security IP list. + // + // You must use the prefix seclist: or seciplist: to identify the list type. + // + // Required + SourceList string `json:"src_list"` +} + +// UpdateSecRule modifies the properties of the sec rule with the given name. +func (c *SecRulesClient) UpdateSecRule(updateInput *UpdateSecRuleInput) (*SecRuleInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.SourceList = c.getQualifiedListName(updateInput.SourceList) + updateInput.DestinationList = c.getQualifiedListName(updateInput.DestinationList) + updateInput.Application = c.getQualifiedName(updateInput.Application) + + var ruleInfo SecRuleInfo + if err := c.updateResource(updateInput.Name, updateInput, &ruleInfo); err != nil { + return nil, err + } + + return c.success(&ruleInfo) +} + +// DeleteSecRuleInput describes the sec rule to delete +type DeleteSecRuleInput struct { + // The name of the Sec Rule to delete. + // Required + Name string `json:"name"` +} + +// DeleteSecRule deletes the sec rule with the given name. +func (c *SecRulesClient) DeleteSecRule(deleteInput *DeleteSecRuleInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecRulesClient) success(ruleInfo *SecRuleInfo) (*SecRuleInfo, error) { + ruleInfo.Name = c.getUnqualifiedName(ruleInfo.Name) + ruleInfo.SourceList = c.unqualifyListName(ruleInfo.SourceList) + ruleInfo.DestinationList = c.unqualifyListName(ruleInfo.DestinationList) + ruleInfo.Application = c.getUnqualifiedName(ruleInfo.Application) + return ruleInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go new file mode 100644 index 000000000..007e234da --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_applications.go @@ -0,0 +1,150 @@ +package compute + +// SecurityApplicationsClient is a client for the Security Application functions of the Compute API. +type SecurityApplicationsClient struct { + ResourceClient +} + +// SecurityApplications obtains a SecurityApplicationsClient which can be used to access to the +// Security Application functions of the Compute API +func (c *Client) SecurityApplications() *SecurityApplicationsClient { + return &SecurityApplicationsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "security application", + ContainerPath: "/secapplication/", + ResourceRootPath: "/secapplication", + }} +} + +// SecurityApplicationInfo describes an existing security application. +type SecurityApplicationInfo struct { + // A description of the security application. + Description string `json:"description"` + // The TCP or UDP destination port number. This can be a port range, such as 5900-5999 for TCP. + DPort string `json:"dport"` + // The ICMP code. + ICMPCode SecurityApplicationICMPCode `json:"icmpcode"` + // The ICMP type. + ICMPType SecurityApplicationICMPType `json:"icmptype"` + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The protocol to use. + Protocol SecurityApplicationProtocol `json:"protocol"` + // The Uniform Resource Identifier + URI string `json:"uri"` +} + +type SecurityApplicationProtocol string + +const ( + All SecurityApplicationProtocol = "all" + AH SecurityApplicationProtocol = "ah" + ESP SecurityApplicationProtocol = "esp" + ICMP SecurityApplicationProtocol = "icmp" + ICMPV6 SecurityApplicationProtocol = "icmpv6" + IGMP SecurityApplicationProtocol = "igmp" + IPIP SecurityApplicationProtocol = "ipip" + GRE SecurityApplicationProtocol = "gre" + MPLSIP SecurityApplicationProtocol = "mplsip" + OSPF SecurityApplicationProtocol = "ospf" + PIM SecurityApplicationProtocol = "pim" + RDP SecurityApplicationProtocol = "rdp" + SCTP SecurityApplicationProtocol = "sctp" + TCP SecurityApplicationProtocol = "tcp" + UDP SecurityApplicationProtocol = "udp" +) + +type SecurityApplicationICMPCode string + +const ( + Admin SecurityApplicationICMPCode = "admin" + Df SecurityApplicationICMPCode = "df" + Host SecurityApplicationICMPCode = "host" + Network SecurityApplicationICMPCode = "network" + Port SecurityApplicationICMPCode = "port" + Protocol SecurityApplicationICMPCode = "protocol" +) + +type SecurityApplicationICMPType string + +const ( + Echo SecurityApplicationICMPType = "echo" + Reply SecurityApplicationICMPType = "reply" + TTL SecurityApplicationICMPType = "ttl" + TraceRoute SecurityApplicationICMPType = "traceroute" + Unreachable SecurityApplicationICMPType = "unreachable" +) + +func (c *SecurityApplicationsClient) success(result *SecurityApplicationInfo) (*SecurityApplicationInfo, error) { + c.unqualify(&result.Name) + return result, nil +} + +// CreateSecurityApplicationInput describes the Security Application to create +type CreateSecurityApplicationInput struct { + // A description of the security application. + // Optional + Description string `json:"description"` + // The TCP or UDP destination port number. + // You can also specify a port range, such as 5900-5999 for TCP. + // This parameter isn't relevant to the icmp protocol. + // Required if the Protocol is TCP or UDP + DPort string `json:"dport"` + // The ICMP code. This parameter is relevant only if you specify ICMP as the protocol. + // If you specify icmp as the protocol and don't specify icmptype or icmpcode, then all ICMP packets are matched. + // Optional + ICMPCode SecurityApplicationICMPCode `json:"icmpcode,omitempty"` + // This parameter is relevant only if you specify ICMP as the protocol. + // If you specify icmp as the protocol and don't specify icmptype or icmpcode, then all ICMP packets are matched. + // Optional + ICMPType SecurityApplicationICMPType `json:"icmptype,omitempty"` + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The protocol to use. + // Required + Protocol SecurityApplicationProtocol `json:"protocol"` +} + +// CreateSecurityApplication creates a new security application. +func (c *SecurityApplicationsClient) CreateSecurityApplication(input *CreateSecurityApplicationInput) (*SecurityApplicationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var appInfo SecurityApplicationInfo + if err := c.createResource(&input, &appInfo); err != nil { + return nil, err + } + + return c.success(&appInfo) +} + +// GetSecurityApplicationInput describes the Security Application to obtain +type GetSecurityApplicationInput struct { + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityApplication retrieves the security application with the given name. +func (c *SecurityApplicationsClient) GetSecurityApplication(input *GetSecurityApplicationInput) (*SecurityApplicationInfo, error) { + var appInfo SecurityApplicationInfo + if err := c.getResource(input.Name, &appInfo); err != nil { + return nil, err + } + + return c.success(&appInfo) +} + +// DeleteSecurityApplicationInput describes the Security Application to delete +type DeleteSecurityApplicationInput struct { + // The three-part name of the Security Application (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityApplication deletes the security application with the given name. +func (c *SecurityApplicationsClient) DeleteSecurityApplication(input *DeleteSecurityApplicationInput) error { + return c.deleteResource(input.Name) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go new file mode 100644 index 000000000..ea30d7813 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_associations.go @@ -0,0 +1,95 @@ +package compute + +// SecurityAssociationsClient is a client for the Security Association functions of the Compute API. +type SecurityAssociationsClient struct { + ResourceClient +} + +// SecurityAssociations obtains a SecurityAssociationsClient which can be used to access to the +// Security Association functions of the Compute API +func (c *Client) SecurityAssociations() *SecurityAssociationsClient { + return &SecurityAssociationsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "security association", + ContainerPath: "/secassociation/", + ResourceRootPath: "/secassociation", + }} +} + +// SecurityAssociationInfo describes an existing security association. +type SecurityAssociationInfo struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The name of the Security List that you want to associate with the instance. + SecList string `json:"seclist"` + // vCable of the instance that you want to associate with the security list. + VCable string `json:"vcable"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityAssociationInput defines a security association to be created. +type CreateSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // If you don't specify a name for this object, then the name is generated automatically. + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Optional + Name string `json:"name"` + // The name of the Security list that you want to associate with the instance. + // Required + SecList string `json:"seclist"` + // The name of the vCable of the instance that you want to associate with the security list. + // Required + VCable string `json:"vcable"` +} + +// CreateSecurityAssociation creates a security association between the given VCable and security list. +func (c *SecurityAssociationsClient) CreateSecurityAssociation(createInput *CreateSecurityAssociationInput) (*SecurityAssociationInfo, error) { + if createInput.Name != "" { + createInput.Name = c.getQualifiedName(createInput.Name) + } + createInput.VCable = c.getQualifiedName(createInput.VCable) + createInput.SecList = c.getQualifiedName(createInput.SecList) + + var assocInfo SecurityAssociationInfo + if err := c.createResource(&createInput, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +// GetSecurityAssociationInput describes the security association to get +type GetSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityAssociation retrieves the security association with the given name. +func (c *SecurityAssociationsClient) GetSecurityAssociation(getInput *GetSecurityAssociationInput) (*SecurityAssociationInfo, error) { + var assocInfo SecurityAssociationInfo + if err := c.getResource(getInput.Name, &assocInfo); err != nil { + return nil, err + } + + return c.success(&assocInfo) +} + +// DeleteSecurityAssociationInput describes the security association to delete +type DeleteSecurityAssociationInput struct { + // The three-part name of the Security Association (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityAssociation deletes the security association with the given name. +func (c *SecurityAssociationsClient) DeleteSecurityAssociation(deleteInput *DeleteSecurityAssociationInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityAssociationsClient) success(assocInfo *SecurityAssociationInfo) (*SecurityAssociationInfo, error) { + c.unqualify(&assocInfo.Name, &assocInfo.SecList, &assocInfo.VCable) + return assocInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go new file mode 100644 index 000000000..08314cd12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_ip_lists.go @@ -0,0 +1,113 @@ +package compute + +// SecurityIPListsClient is a client for the Security IP List functions of the Compute API. +type SecurityIPListsClient struct { + ResourceClient +} + +// SecurityIPLists obtains a SecurityIPListsClient which can be used to access to the +// Security IP List functions of the Compute API +func (c *Client) SecurityIPLists() *SecurityIPListsClient { + return &SecurityIPListsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "security ip list", + ContainerPath: "/seciplist/", + ResourceRootPath: "/seciplist", + }} +} + +// SecurityIPListInfo describes an existing security IP list. +type SecurityIPListInfo struct { + // A description of the security IP list. + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + SecIPEntries []string `json:"secipentries"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityIPListInput defines a security IP list to be created. +type CreateSecurityIPListInput struct { + // A description of the security IP list. + // Optional + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + // Required + SecIPEntries []string `json:"secipentries"` +} + +// CreateSecurityIPList creates a security IP list with the given name and entries. +func (c *SecurityIPListsClient) CreateSecurityIPList(createInput *CreateSecurityIPListInput) (*SecurityIPListInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + var listInfo SecurityIPListInfo + if err := c.createResource(createInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// GetSecurityIPListInput describes the Security IP List to obtain +type GetSecurityIPListInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetSecurityIPList gets the security IP list with the given name. +func (c *SecurityIPListsClient) GetSecurityIPList(getInput *GetSecurityIPListInput) (*SecurityIPListInfo, error) { + var listInfo SecurityIPListInfo + if err := c.getResource(getInput.Name, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// UpdateSecurityIPListInput describes the security ip list to update +type UpdateSecurityIPListInput struct { + // A description of the security IP list. + // Optional + Description string `json:"description"` + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` + // A comma-separated list of the subnets (in CIDR format) or IPv4 addresses for which you want to create this security IP list. + // Required + SecIPEntries []string `json:"secipentries"` +} + +// UpdateSecurityIPList modifies the entries in the security IP list with the given name. +func (c *SecurityIPListsClient) UpdateSecurityIPList(updateInput *UpdateSecurityIPListInput) (*SecurityIPListInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var listInfo SecurityIPListInfo + if err := c.updateResource(updateInput.Name, updateInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// DeleteSecurityIPListInput describes the security ip list to delete. +type DeleteSecurityIPListInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteSecurityIPList deletes the security IP list with the given name. +func (c *SecurityIPListsClient) DeleteSecurityIPList(deleteInput *DeleteSecurityIPListInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityIPListsClient) success(listInfo *SecurityIPListInfo) (*SecurityIPListInfo, error) { + c.unqualify(&listInfo.Name) + return listInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go new file mode 100644 index 000000000..fd654fac6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go @@ -0,0 +1,131 @@ +package compute + +// SecurityListsClient is a client for the Security List functions of the Compute API. +type SecurityListsClient struct { + ResourceClient +} + +// SecurityLists obtains a SecurityListsClient which can be used to access to the +// Security List functions of the Compute API +func (c *Client) SecurityLists() *SecurityListsClient { + return &SecurityListsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "security list", + ContainerPath: "/seclist/", + ResourceRootPath: "/seclist", + }} +} + +type SecurityListPolicy string + +const ( + SecurityListPolicyDeny SecurityListPolicy = "deny" + SecurityListPolicyReject SecurityListPolicy = "reject" + SecurityListPolicyPermit SecurityListPolicy = "permit" +) + +// SecurityListInfo describes an existing security list. +type SecurityListInfo struct { + // Shows the default account for your identity domain. + Account string `json:"account"` + // A description of the security list. + Description string `json:description` + // The three-part name of the security list (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The policy for outbound traffic from the security list. + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list + Policy SecurityListPolicy `json:"policy"` + // Uniform Resource Identifier + URI string `json:"uri"` +} + +// CreateSecurityListInput defines a security list to be created. +type CreateSecurityListInput struct { + // A description of the security list. + // Optional + Description string `json:"description"` + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The policy for outbound traffic from the security list. + // Optional (defaults to `permit`) + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list. + // Optional (defaults to `deny`) + Policy SecurityListPolicy `json:"policy"` +} + +// CreateSecurityList creates a new security list with the given name, policy and outbound CIDR policy. +func (c *SecurityListsClient) CreateSecurityList(createInput *CreateSecurityListInput) (*SecurityListInfo, error) { + createInput.Name = c.getQualifiedName(createInput.Name) + var listInfo SecurityListInfo + if err := c.createResource(createInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// GetSecurityListInput describes the security list you want to get +type GetSecurityListInput struct { + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:name` +} + +// GetSecurityList retrieves the security list with the given name. +func (c *SecurityListsClient) GetSecurityList(getInput *GetSecurityListInput) (*SecurityListInfo, error) { + var listInfo SecurityListInfo + if err := c.getResource(getInput.Name, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// UpdateSecurityListInput defines what to update in a security list +type UpdateSecurityListInput struct { + // A description of the security list. + // Optional + Description string `json:description` + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` + // The policy for outbound traffic from the security list. + // Optional (defaults to `permit`) + OutboundCIDRPolicy SecurityListPolicy `json:"outbound_cidr_policy"` + // The policy for inbound traffic to the security list. + // Optional (defaults to `deny`) + Policy SecurityListPolicy `json:"policy"` +} + +// UpdateSecurityList updates the policy and outbound CIDR pol +func (c *SecurityListsClient) UpdateSecurityList(updateInput *UpdateSecurityListInput) (*SecurityListInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var listInfo SecurityListInfo + if err := c.updateResource(updateInput.Name, updateInput, &listInfo); err != nil { + return nil, err + } + + return c.success(&listInfo) +} + +// DeleteSecurityListInput describes the security list to destroy +type DeleteSecurityListInput struct { + // The three-part name of the Security List (/Compute-identity_domain/user/object). + // Required + Name string `json:name` +} + +// DeleteSecurityList deletes the security list with the given name. +func (c *SecurityListsClient) DeleteSecurityList(deleteInput *DeleteSecurityListInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SecurityListsClient) success(listInfo *SecurityListInfo) (*SecurityListInfo, error) { + c.unqualify(&listInfo.Name) + return listInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go new file mode 100644 index 000000000..e54aeef89 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_protocols.go @@ -0,0 +1,187 @@ +package compute + +const ( + SecurityProtocolDescription = "security protocol" + SecurityProtocolContainerPath = "/network/v1/secprotocol/" + SecurityProtocolResourcePath = "/network/v1/secprotocol" +) + +type SecurityProtocolsClient struct { + ResourceClient +} + +// SecurityProtocols() returns an SecurityProtocolsClient that can be used to access the +// necessary CRUD functions for Security Protocols. +func (c *Client) SecurityProtocols() *SecurityProtocolsClient { + return &SecurityProtocolsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: SecurityProtocolDescription, + ContainerPath: SecurityProtocolContainerPath, + ResourceRootPath: SecurityProtocolResourcePath, + }, + } +} + +// SecurityProtocolInfo contains the exported fields necessary to hold all the information about an +// Security Protocol +type SecurityProtocolInfo struct { + // List of port numbers or port range strings to match the packet's destination port. + DstPortSet []string `json:"dstPortSet"` + // Protocol used in the data portion of the IP datagram. + IPProtocol string `json:"ipProtocol"` + // List of port numbers or port range strings to match the packet's source port. + SrcPortSet []string `json:"srcPortSet"` + // The name of the Security Protocol + Name string `json:"name"` + // Description of the Security Protocol + Description string `json:"description"` + // Slice of tags associated with the Security Protocol + Tags []string `json:"tags"` + // Uniform Resource Identifier for the Security Protocol + Uri string `json:"uri"` +} + +type CreateSecurityProtocolInput struct { + // The name of the Security Protocol to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the SecurityProtocol + // Optional + Description string `json:"description"` + + // Enter a list of port numbers or port range strings. + //Traffic is enabled by a security rule when a packet's destination port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535, + // inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no destination ports are specified, all destination ports or ICMP types are allowed. + // Optional + DstPortSet []string `json:"dstPortSet"` + + // The protocol used in the data portion of the IP datagram. + // Specify one of the permitted values or enter a number in the range 0–254 to + // represent the protocol that you want to specify. See Assigned Internet Protocol Numbers. + // Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp, + // mplsip, all. + // Traffic is enabled by a security rule when the protocol in the packet matches the + // protocol specified here. If no protocol is specified, all protocols are allowed. + // Optional + IPProtocol string `json:"ipProtocol"` + + // Enter a list of port numbers or port range strings. + // Traffic is enabled by a security rule when a packet's source port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a source transport port, + // between 0 and 65535, inclusive. + // For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no source ports are specified, all source ports or ICMP types are allowed. + // Optional + SrcPortSet []string `json:"srcPortSet"` + + // String slice of tags to apply to the Security Protocol object + // Optional + Tags []string `json:"tags"` +} + +// Create a new Security Protocol from an SecurityProtocolsClient and an input struct. +// Returns a populated Info struct for the Security Protocol, and any errors +func (c *SecurityProtocolsClient) CreateSecurityProtocol(input *CreateSecurityProtocolInput) (*SecurityProtocolInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo SecurityProtocolInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetSecurityProtocolInput struct { + // The name of the Security Protocol to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated SecurityProtocolInfo struct from an input struct +func (c *SecurityProtocolsClient) GetSecurityProtocol(input *GetSecurityProtocolInput) (*SecurityProtocolInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo SecurityProtocolInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateSecurityProtocolInput defines what to update in a security protocol +type UpdateSecurityProtocolInput struct { + // The name of the Security Protocol to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // Description of the SecurityProtocol + // Optional + Description string `json:"description"` + + // Enter a list of port numbers or port range strings. + //Traffic is enabled by a security rule when a packet's destination port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535, + // inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no destination ports are specified, all destination ports or ICMP types are allowed. + DstPortSet []string `json:"dstPortSet"` + + // The protocol used in the data portion of the IP datagram. + // Specify one of the permitted values or enter a number in the range 0–254 to + // represent the protocol that you want to specify. See Assigned Internet Protocol Numbers. + // Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp, + // mplsip, all. + // Traffic is enabled by a security rule when the protocol in the packet matches the + // protocol specified here. If no protocol is specified, all protocols are allowed. + IPProtocol string `json:"ipProtocol"` + + // Enter a list of port numbers or port range strings. + // Traffic is enabled by a security rule when a packet's source port matches the + // ports specified here. + // For TCP, SCTP, and UDP, each port is a source transport port, + // between 0 and 65535, inclusive. + // For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + // If no source ports are specified, all source ports or ICMP types are allowed. + SrcPortSet []string `json:"srcPortSet"` + + // String slice of tags to apply to the Security Protocol object + // Optional + Tags []string `json:"tags"` +} + +// UpdateSecurityProtocol update the security protocol +func (c *SecurityProtocolsClient) UpdateSecurityProtocol(updateInput *UpdateSecurityProtocolInput) (*SecurityProtocolInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + var ipInfo SecurityProtocolInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteSecurityProtocolInput struct { + // The name of the Security Protocol to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *SecurityProtocolsClient) DeleteSecurityProtocol(input *DeleteSecurityProtocolInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the SecurityProtocolInfo struct +func (c *SecurityProtocolsClient) success(info *SecurityProtocolInfo) (*SecurityProtocolInfo, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go new file mode 100644 index 000000000..025a3465a --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_rules.go @@ -0,0 +1,266 @@ +package compute + +const ( + SecurityRuleDescription = "security rules" + SecurityRuleContainerPath = "/network/v1/secrule/" + SecurityRuleResourcePath = "/network/v1/secrule" +) + +type SecurityRuleClient struct { + ResourceClient +} + +// SecurityRules() returns an SecurityRulesClient that can be used to access the +// necessary CRUD functions for Security Rules. +func (c *Client) SecurityRules() *SecurityRuleClient { + return &SecurityRuleClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: SecurityRuleDescription, + ContainerPath: SecurityRuleContainerPath, + ResourceRootPath: SecurityRuleResourcePath, + }, + } +} + +// SecurityRuleInfo contains the exported fields necessary to hold all the information about a +// Security Rule +type SecurityRuleInfo struct { + // Name of the ACL that contains this rule. + ACL string `json:"acl"` + // Description of the Security Rule + Description string `json:"description"` + // List of IP address prefix set names to match the packet's destination IP address. + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + // Name of virtual NIC set containing the packet's destination virtual NIC. + DstVnicSet string `json:"dstVnicSet"` + // Allows the security rule to be disabled. + Enabled bool `json:"enabledFlag"` + // Direction of the flow; Can be "egress" or "ingress". + FlowDirection string `json:"FlowDirection"` + // The name of the Security Rule + Name string `json:"name"` + // List of security protocol names to match the packet's protocol and port. + SecProtocols []string `json:"secProtocols"` + // List of multipart names of IP address prefix set to match the packet's source IP address. + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + // Name of virtual NIC set containing the packet's source virtual NIC. + SrcVnicSet string `json:"srcVnicSet"` + // Slice of tags associated with the Security Rule + Tags []string `json:"tags"` + // Uniform Resource Identifier for the Security Rule + Uri string `json:"uri"` +} + +type CreateSecurityRuleInput struct { + //Select the name of the access control list (ACL) that you want to add this + // security rule to. Security rules are applied to vNIC sets by using ACLs. + // Optional + ACL string `json:"acl,omitempty"` + + // Description of the Security Rule + // Optional + Description string `json:"description"` + + // A list of IP address prefix sets to which you want to permit traffic. + // Only packets to IP addresses in the specified IP address prefix sets are permitted. + // When no destination IP address prefix sets are specified, traffic to any + // IP address is permitted. + // Optional + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + + // The vNICset to which you want to permit traffic. Only packets to vNICs in the + // specified vNICset are permitted. When no destination vNICset is specified, traffic + // to any vNIC is permitted. + // Optional + DstVnicSet string `json:"dstVnicSet,omitempty"` + + // Allows the security rule to be enabled or disabled. This parameter is set to + // true by default. Specify false to disable the security rule. + // Optional + Enabled bool `json:"enabledFlag"` + + // Specify the direction of flow of traffic, which is relative to the instances, + // for this security rule. Allowed values are ingress or egress. + // An ingress packet is a packet received by a virtual NIC, for example from + // another virtual NIC or from the public Internet. + // An egress packet is a packet sent by a virtual NIC, for example to another + // virtual NIC or to the public Internet. + // Required + FlowDirection string `json:"flowDirection"` + + // The name of the Security Rule + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. When you specify the object name, ensure that an object + // of the same type and with the same name doesn't already exist. + // If such an object already exists, another object of the same type and with the same name won't + // be created and the existing object won't be updated. + // Required + Name string `json:"name"` + + // A list of security protocols for which you want to permit traffic. Only packets that + // match the specified protocols and ports are permitted. When no security protocols are + // specified, traffic using any protocol over any port is permitted. + // Optional + SecProtocols []string `json:"secProtocols"` + + // A list of IP address prefix sets from which you want to permit traffic. Only packets + // from IP addresses in the specified IP address prefix sets are permitted. When no source + // IP address prefix sets are specified, traffic from any IP address is permitted. + // Optional + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + + // The vNICset from which you want to permit traffic. Only packets from vNICs in the + // specified vNICset are permitted. When no source vNICset is specified, traffic from any + // vNIC is permitted. + // Optional + SrcVnicSet string `json:"srcVnicSet,omitempty"` + + // Strings that you can use to tag the security rule. + // Optional + Tags []string `json:"tags"` +} + +// Create a new Security Rule from an SecurityRuleClient and an input struct. +// Returns a populated Info struct for the Security Rule, and any errors +func (c *SecurityRuleClient) CreateSecurityRule(input *CreateSecurityRuleInput) (*SecurityRuleInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ACL = c.getQualifiedName(input.ACL) + input.SrcVnicSet = c.getQualifiedName(input.SrcVnicSet) + input.DstVnicSet = c.getQualifiedName(input.DstVnicSet) + input.SrcIpAddressPrefixSets = c.getQualifiedList(input.SrcIpAddressPrefixSets) + input.DstIpAddressPrefixSets = c.getQualifiedList(input.DstIpAddressPrefixSets) + input.SecProtocols = c.getQualifiedList(input.SecProtocols) + + var securityRuleInfo SecurityRuleInfo + if err := c.createResource(&input, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +type GetSecurityRuleInput struct { + // The name of the Security Rule to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated SecurityRuleInfo struct from an input struct +func (c *SecurityRuleClient) GetSecurityRule(input *GetSecurityRuleInput) (*SecurityRuleInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var securityRuleInfo SecurityRuleInfo + if err := c.getResource(input.Name, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +// UpdateSecurityRuleInput describes a secruity rule to update +type UpdateSecurityRuleInput struct { + //Select the name of the access control list (ACL) that you want to add this + // security rule to. Security rules are applied to vNIC sets by using ACLs. + // Optional + ACL string `json:"acl,omitempty"` + + // Description of the Security Rule + // Optional + Description string `json:"description"` + + // A list of IP address prefix sets to which you want to permit traffic. + // Only packets to IP addresses in the specified IP address prefix sets are permitted. + // When no destination IP address prefix sets are specified, traffic to any + // IP address is permitted. + // Optional + DstIpAddressPrefixSets []string `json:"dstIpAddressPrefixSets"` + + // The vNICset to which you want to permit traffic. Only packets to vNICs in the + // specified vNICset are permitted. When no destination vNICset is specified, traffic + // to any vNIC is permitted. + // Optional + DstVnicSet string `json:"dstVnicSet,omitempty"` + + // Allows the security rule to be enabled or disabled. This parameter is set to + // true by default. Specify false to disable the security rule. + // Optional + Enabled bool `json:"enabledFlag"` + + // Specify the direction of flow of traffic, which is relative to the instances, + // for this security rule. Allowed values are ingress or egress. + // An ingress packet is a packet received by a virtual NIC, for example from + // another virtual NIC or from the public Internet. + // An egress packet is a packet sent by a virtual NIC, for example to another + // virtual NIC or to the public Internet. + // Required + FlowDirection string `json:"flowDirection"` + + // The name of the Security Rule + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. + // Object names are case-sensitive. When you specify the object name, ensure that an object + // of the same type and with the same name doesn't already exist. + // If such an object already exists, another object of the same type and with the same name won't + // be created and the existing object won't be updated. + // Required + Name string `json:"name"` + + // A list of security protocols for which you want to permit traffic. Only packets that + // match the specified protocols and ports are permitted. When no security protocols are + // specified, traffic using any protocol over any port is permitted. + // Optional + SecProtocols []string `json:"secProtocols"` + + // A list of IP address prefix sets from which you want to permit traffic. Only packets + // from IP addresses in the specified IP address prefix sets are permitted. When no source + // IP address prefix sets are specified, traffic from any IP address is permitted. + // Optional + SrcIpAddressPrefixSets []string `json:"srcIpAddressPrefixSets"` + + // The vNICset from which you want to permit traffic. Only packets from vNICs in the + // specified vNICset are permitted. When no source vNICset is specified, traffic from any + // vNIC is permitted. + // Optional + SrcVnicSet string `json:"srcVnicSet,omitempty"` + + // Strings that you can use to tag the security rule. + // Optional + Tags []string `json:"tags"` +} + +// UpdateSecRule modifies the properties of the sec rule with the given name. +func (c *SecurityRuleClient) UpdateSecurityRule(updateInput *UpdateSecurityRuleInput) (*SecurityRuleInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.ACL = c.getQualifiedName(updateInput.ACL) + updateInput.SrcVnicSet = c.getQualifiedName(updateInput.SrcVnicSet) + updateInput.DstVnicSet = c.getQualifiedName(updateInput.DstVnicSet) + updateInput.SrcIpAddressPrefixSets = c.getQualifiedList(updateInput.SrcIpAddressPrefixSets) + updateInput.DstIpAddressPrefixSets = c.getQualifiedList(updateInput.DstIpAddressPrefixSets) + updateInput.SecProtocols = c.getQualifiedList(updateInput.SecProtocols) + + var securityRuleInfo SecurityRuleInfo + if err := c.updateResource(updateInput.Name, updateInput, &securityRuleInfo); err != nil { + return nil, err + } + + return c.success(&securityRuleInfo) +} + +type DeleteSecurityRuleInput struct { + // The name of the Security Rule to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *SecurityRuleClient) DeleteSecurityRule(input *DeleteSecurityRuleInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPNetworkExchangeInfo struct +func (c *SecurityRuleClient) success(info *SecurityRuleInfo) (*SecurityRuleInfo, error) { + c.unqualify(&info.Name, &info.ACL, &info.SrcVnicSet, &info.DstVnicSet) + info.SrcIpAddressPrefixSets = c.getUnqualifiedList(info.SrcIpAddressPrefixSets) + info.DstIpAddressPrefixSets = c.getUnqualifiedList(info.DstIpAddressPrefixSets) + info.SecProtocols = c.getUnqualifiedList(info.SecProtocols) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go new file mode 100644 index 000000000..8ae2b03c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go @@ -0,0 +1,112 @@ +package compute + +// SSHKeysClient is a client for the SSH key functions of the Compute API. +type SSHKeysClient struct { + ResourceClient +} + +// SSHKeys obtains an SSHKeysClient which can be used to access to the +// SSH key functions of the Compute API +func (c *Client) SSHKeys() *SSHKeysClient { + return &SSHKeysClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "SSH key", + ContainerPath: "/sshkey/", + ResourceRootPath: "/sshkey", + }} +} + +// SSHKeyInfo describes an existing SSH key. +type SSHKey struct { + // Indicates whether the key is enabled (true) or disabled. + Enabled bool `json:"enabled"` + // The SSH public key value. + Key string `json:"key"` + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:"name"` + // Unique Resource Identifier + URI string `json:"uri"` +} + +// CreateSSHKeyInput defines an SSH key to be created. +type CreateSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + // Object names can contain only alphanumeric characters, hyphens, underscores, and periods. Object names are case-sensitive. + // Required + Name string `json:"name"` + // The SSH public key value. + // Required + Key string `json:"key"` + // Indicates whether the key must be enabled (default) or disabled. Note that disabled keys cannot be associated with instances. + // To explicitly enable the key, specify true. To disable the key, specify false. + // Optional + Enabled bool `json:"enabled"` +} + +// CreateSSHKey creates a new SSH key with the given name, key and enabled flag. +func (c *SSHKeysClient) CreateSSHKey(createInput *CreateSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + createInput.Name = c.getQualifiedName(createInput.Name) + if err := c.createResource(&createInput, &keyInfo); err != nil { + return nil, err + } + + return c.success(&keyInfo) +} + +// GetSSHKeyInput describes the ssh key to get +type GetSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:name` +} + +// GetSSHKey retrieves the SSH key with the given name. +func (c *SSHKeysClient) GetSSHKey(getInput *GetSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + if err := c.getResource(getInput.Name, &keyInfo); err != nil { + return nil, err + } + + return c.success(&keyInfo) +} + +// UpdateSSHKeyInput defines an SSH key to be updated +type UpdateSSHKeyInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + // The SSH public key value. + // Required + Key string `json:"key"` + // Indicates whether the key must be enabled (default) or disabled. Note that disabled keys cannot be associated with instances. + // To explicitly enable the key, specify true. To disable the key, specify false. + // Optional + // TODO/NOTE: isn't this required? + Enabled bool `json:"enabled"` +} + +// UpdateSSHKey updates the key and enabled flag of the SSH key with the given name. +func (c *SSHKeysClient) UpdateSSHKey(updateInput *UpdateSSHKeyInput) (*SSHKey, error) { + var keyInfo SSHKey + updateInput.Name = c.getQualifiedName(updateInput.Name) + if err := c.updateResource(updateInput.Name, updateInput, &keyInfo); err != nil { + return nil, err + } + return c.success(&keyInfo) +} + +// DeleteKeyInput describes the ssh key to delete +type DeleteSSHKeyInput struct { + // The three-part name of the SSH Key (/Compute-identity_domain/user/object). + Name string `json:name` +} + +// DeleteSSHKey deletes the SSH key with the given name. +func (c *SSHKeysClient) DeleteSSHKey(deleteInput *DeleteSSHKeyInput) error { + return c.deleteResource(deleteInput.Name) +} + +func (c *SSHKeysClient) success(keyInfo *SSHKey) (*SSHKey, error) { + c.unqualify(&keyInfo.Name) + return keyInfo, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go new file mode 100644 index 000000000..c24cf0cc9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go @@ -0,0 +1,158 @@ +package compute + +const WaitForVolumeAttachmentDeleteTimeout = 30 +const WaitForVolumeAttachmentReadyTimeout = 30 + +// StorageAttachmentsClient is a client for the Storage Attachment functions of the Compute API. +type StorageAttachmentsClient struct { + ResourceClient +} + +// StorageAttachments obtains a StorageAttachmentsClient which can be used to access to the +// Storage Attachment functions of the Compute API +func (c *Client) StorageAttachments() *StorageAttachmentsClient { + return &StorageAttachmentsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "storage volume attachment", + ContainerPath: "/storage/attachment/", + ResourceRootPath: "/storage/attachment", + }} +} + +type StorageAttachmentState string + +const ( + Attaching StorageAttachmentState = "attaching" + Attached StorageAttachmentState = "attached" + Detaching StorageAttachmentState = "detaching" + Unavailable StorageAttachmentState = "unavailable" + Unknown StorageAttachmentState = "unknown" +) + +// StorageAttachmentInfo describes an existing storage attachment. +type StorageAttachmentInfo struct { + // Name of this attachment, generated by the server. + Name string `json:"name"` + + // Index number for the volume. The allowed range is 1-10 + // An attachment with index 1 is exposed to the instance as /dev/xvdb, an attachment with index 2 is exposed as /dev/xvdc, and so on. + Index int `json:"index"` + + // Multipart name of the instance attached to the storage volume. + InstanceName string `json:"instance_name"` + + // Multipart name of the volume attached to the instance. + StorageVolumeName string `json:"storage_volume_name"` + + // The State of the Storage Attachment + State StorageAttachmentState `json:"state"` +} + +func (c *StorageAttachmentsClient) success(attachmentInfo *StorageAttachmentInfo) (*StorageAttachmentInfo, error) { + c.unqualify(&attachmentInfo.Name, &attachmentInfo.InstanceName, &attachmentInfo.StorageVolumeName) + return attachmentInfo, nil +} + +type CreateStorageAttachmentInput struct { + // Index number for the volume. The allowed range is 1-10 + // An attachment with index 1 is exposed to the instance as /dev/xvdb, an attachment with index 2 is exposed as /dev/xvdc, and so on. + // Required. + Index int `json:"index"` + + // Multipart name of the instance to which you want to attach the volume. + // Required. + InstanceName string `json:"instance_name"` + + // Multipart name of the volume that you want to attach. + // Required. + StorageVolumeName string `json:"storage_volume_name"` +} + +// CreateStorageAttachment creates a storage attachment attaching the given volume to the given instance at the given index. +func (c *StorageAttachmentsClient) CreateStorageAttachment(input *CreateStorageAttachmentInput) (*StorageAttachmentInfo, error) { + input.InstanceName = c.getQualifiedName(input.InstanceName) + + var attachmentInfo *StorageAttachmentInfo + if err := c.createResource(&input, &attachmentInfo); err != nil { + return nil, err + } + + return c.waitForStorageAttachmentToFullyAttach(attachmentInfo.Name, WaitForVolumeAttachmentReadyTimeout) +} + +// DeleteStorageAttachmentInput represents the body of an API request to delete a Storage Attachment. +type DeleteStorageAttachmentInput struct { + // The three-part name of the Storage Attachment (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// DeleteStorageAttachment deletes the storage attachment with the given name. +func (c *StorageAttachmentsClient) DeleteStorageAttachment(input *DeleteStorageAttachmentInput) error { + if err := c.deleteResource(input.Name); err != nil { + return err + } + + return c.waitForStorageAttachmentToBeDeleted(input.Name, WaitForVolumeAttachmentDeleteTimeout) +} + +// GetStorageAttachmentInput represents the body of an API request to obtain a Storage Attachment. +type GetStorageAttachmentInput struct { + // The three-part name of the Storage Attachment (/Compute-identity_domain/user/object). + // Required + Name string `json:"name"` +} + +// GetStorageAttachment retrieves the storage attachment with the given name. +func (c *StorageAttachmentsClient) GetStorageAttachment(input *GetStorageAttachmentInput) (*StorageAttachmentInfo, error) { + var attachmentInfo *StorageAttachmentInfo + if err := c.getResource(input.Name, &attachmentInfo); err != nil { + return nil, err + } + + return c.success(attachmentInfo) +} + +// waitForStorageAttachmentToFullyAttach waits for the storage attachment with the given name to be fully attached, or times out. +func (c *StorageAttachmentsClient) waitForStorageAttachmentToFullyAttach(name string, timeoutSeconds int) (*StorageAttachmentInfo, error) { + var waitResult *StorageAttachmentInfo + + err := c.waitFor("storage attachment to be attached", timeoutSeconds, func() (bool, error) { + input := &GetStorageAttachmentInput{ + Name: name, + } + info, err := c.GetStorageAttachment(input) + if err != nil { + return false, err + } + + if info != nil { + if info.State == Attached { + waitResult = info + return true, nil + } + } + + return false, nil + }) + + return waitResult, err +} + +// waitForStorageAttachmentToBeDeleted waits for the storage attachment with the given name to be fully deleted, or times out. +func (c *StorageAttachmentsClient) waitForStorageAttachmentToBeDeleted(name string, timeoutSeconds int) error { + return c.waitFor("storage attachment to be deleted", timeoutSeconds, func() (bool, error) { + input := &GetStorageAttachmentInput{ + Name: name, + } + _, err := c.GetStorageAttachment(input) + if err != nil { + if WasNotFoundError(err) { + return true, nil + } + return false, err + } + return false, nil + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go new file mode 100644 index 000000000..c40c354a9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go @@ -0,0 +1,345 @@ +package compute + +import ( + "fmt" + "strconv" + "strings" +) + +const WaitForVolumeReadyTimeout = 30 +const WaitForVolumeDeleteTimeout = 30 + +// StorageVolumeClient is a client for the Storage Volume functions of the Compute API. +type StorageVolumeClient struct { + ResourceClient +} + +// StorageVolumes obtains a StorageVolumeClient which can be used to access to the +// Storage Volume functions of the Compute API +func (c *Client) StorageVolumes() *StorageVolumeClient { + return &StorageVolumeClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "storage volume", + ContainerPath: "/storage/volume/", + ResourceRootPath: "/storage/volume", + }} + +} + +type StorageVolumeKind string + +const ( + StorageVolumeKindDefault StorageVolumeKind = "/oracle/public/storage/default" + StorageVolumeKindLatency StorageVolumeKind = "/oracle/public/storage/latency" +) + +// StorageVolumeInfo represents information retrieved from the service about a Storage Volume. +type StorageVolumeInfo struct { + // Shows the default account for your identity domain. + Account string `json:"account,omitempty"` + + // true indicates that the storage volume can also be used as a boot disk for an instance. + // If you set the value to true, then you must specify values for the `ImageList` and `ImageListEntry` fields. + Bootable bool `json:"bootable,omitempty"` + + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // The hypervisor that this volume is compatible with. + Hypervisor string `json:"hypervisor,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // Three-part name of the machine image. This information is available if the volume is a bootable storage volume. + MachineImage string `json:"machineimage_name,omitempty"` + + // All volumes are managed volumes. Default value is true. + Managed bool `json:"managed,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The OS platform this volume is compatible with. + Platform string `json:"platform,omitempty` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // Boolean field indicating whether this volume can be attached as readonly. If set to False the volume will be attached as read-write. + ReadOnly bool `json:"readonly,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // TODO: this should become a Constant, if/when we have the values + // The current state of the storage volume. + Status string `json:"status,omitempty"` + + // Details about the latest state of the storage volume. + StatusDetail string `json:"status_detail,omitempty"` + + // It indicates the time that the current view of the storage volume was generated. + StatusTimestamp string `json:"status_timestamp,omitempty"` + + // The storage pool from which this volume is allocated. + StoragePool string `json:"storage_pool,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` + + // Uniform Resource Identifier + URI string `json:"uri,omitempty"` +} + +func (c *StorageVolumeClient) getStorageVolumePath(name string) string { + return c.getObjectPath("/storage/volume", name) +} + +// CreateStorageVolumeInput represents the body of an API request to create a new Storage Volume. +type CreateStorageVolumeInput struct { + // true indicates that the storage volume can also be used as a boot disk for an instance. + // If you set the value to true, then you must specify values for the `ImageList` and `ImageListEntry` fields. + Bootable bool `json:"bootable,omitempty"` + + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` +} + +// CreateStorageVolume uses the given CreateStorageVolumeInput to create a new Storage Volume. +func (c *StorageVolumeClient) CreateStorageVolume(input *CreateStorageVolumeInput) (*StorageVolumeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ImageList = c.getQualifiedName(input.ImageList) + + sizeInBytes, err := sizeInBytes(input.Size) + if err != nil { + return nil, err + } + input.Size = sizeInBytes + + var storageInfo StorageVolumeInfo + if err := c.createResource(&input, &storageInfo); err != nil { + return nil, err + } + + return c.waitForStorageVolumeToBecomeAvailable(input.Name, WaitForVolumeReadyTimeout) +} + +// DeleteStorageVolumeInput represents the body of an API request to delete a Storage Volume. +type DeleteStorageVolumeInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` +} + +// DeleteStorageVolume deletes the specified storage volume. +func (c *StorageVolumeClient) DeleteStorageVolume(input *DeleteStorageVolumeInput) error { + if err := c.deleteResource(input.Name); err != nil { + return err + } + + return c.waitForStorageVolumeToBeDeleted(input.Name, WaitForVolumeDeleteTimeout) +} + +// GetStorageVolumeInput represents the body of an API request to obtain a Storage Volume. +type GetStorageVolumeInput struct { + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` +} + +func (c *StorageVolumeClient) success(result *StorageVolumeInfo) (*StorageVolumeInfo, error) { + c.unqualify(&result.Name) + + sizeInMegaBytes, err := sizeInGigaBytes(result.Size) + if err != nil { + return nil, err + } + result.Size = sizeInMegaBytes + + return result, nil +} + +// GetStorageVolume gets Storage Volume information for the specified storage volume. +func (c *StorageVolumeClient) GetStorageVolume(input *GetStorageVolumeInput) (*StorageVolumeInfo, error) { + var storageVolume StorageVolumeInfo + if err := c.getResource(input.Name, &storageVolume); err != nil { + if WasNotFoundError(err) { + return nil, nil + } + + return nil, err + } + + return c.success(&storageVolume) +} + +// UpdateStorageVolumeInput represents the body of an API request to update a Storage Volume. +type UpdateStorageVolumeInput struct { + // The description of the storage volume. + Description string `json:"description,omitempty"` + + // Name of machine image to extract onto this volume when created. This information is provided only for bootable storage volumes. + ImageList string `json:"imagelist,omitempty"` + + // Specific imagelist entry version to extract. + ImageListEntry int `json:"imagelist_entry,omitempty"` + + // The three-part name of the object (/Compute-identity_domain/user/object). + Name string `json:"name"` + + // The storage-pool property: /oracle/public/storage/latency or /oracle/public/storage/default. + Properties []string `json:"properties,omitempty"` + + // The size of this storage volume in GB. + Size string `json:"size"` + + // Name of the parent snapshot from which the storage volume is restored or cloned. + Snapshot string `json:"snapshot,omitempty"` + + // Account of the parent snapshot from which the storage volume is restored. + SnapshotAccount string `json:"snapshot_account,omitempty"` + + // Id of the parent snapshot from which the storage volume is restored or cloned. + SnapshotID string `json:"snapshot_id,omitempty"` + + // Comma-separated strings that tag the storage volume. + Tags []string `json:"tags,omitempty"` +} + +// UpdateStorageVolume updates the specified storage volume, optionally modifying size, description and tags. +func (c *StorageVolumeClient) UpdateStorageVolume(input *UpdateStorageVolumeInput) (*StorageVolumeInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.ImageList = c.getQualifiedName(input.ImageList) + + sizeInBytes, err := sizeInBytes(input.Size) + if err != nil { + return nil, err + } + input.Size = sizeInBytes + + path := c.getStorageVolumePath(input.Name) + _, err = c.executeRequest("PUT", path, input) + if err != nil { + return nil, err + } + + instanceInfo, err := c.waitForStorageVolumeToBecomeAvailable(input.Name, WaitForVolumeReadyTimeout) + if err != nil { + return nil, err + } + + return instanceInfo, nil +} + +// waitForStorageVolumeToBecomeAvailable waits until a new Storage Volume is available (i.e. has finished initialising or updating). +func (c *StorageVolumeClient) waitForStorageVolumeToBecomeAvailable(name string, timeoutInSeconds int) (*StorageVolumeInfo, error) { + var waitResult *StorageVolumeInfo + + err := c.waitFor( + fmt.Sprintf("storage volume %s to become available", c.getQualifiedName(name)), + timeoutInSeconds, + func() (bool, error) { + getRequest := &GetStorageVolumeInput{ + Name: name, + } + result, err := c.GetStorageVolume(getRequest) + + if err != nil { + return false, err + } + + if result != nil { + waitResult = result + if strings.ToLower(waitResult.Status) == "online" { + return true, nil + } + } + + return false, nil + }) + + return waitResult, err +} + +// waitForStorageVolumeToBeDeleted waits until the specified storage volume has been deleted. +func (c *StorageVolumeClient) waitForStorageVolumeToBeDeleted(name string, timeoutInSeconds int) error { + return c.waitFor( + fmt.Sprintf("storage volume %s to be deleted", c.getQualifiedName(name)), + timeoutInSeconds, + func() (bool, error) { + getRequest := &GetStorageVolumeInput{ + Name: name, + } + result, err := c.GetStorageVolume(getRequest) + if result == nil { + return true, nil + } + + if err != nil { + return false, err + } + + return result == nil, nil + }) +} + +func sizeInGigaBytes(input string) (string, error) { + sizeInBytes, err := strconv.Atoi(input) + if err != nil { + return "", err + } + sizeInKB := sizeInBytes / 1024 + sizeInMB := sizeInKB / 1024 + sizeInGb := sizeInMB / 1024 + return strconv.Itoa(sizeInGb), nil +} + +func sizeInBytes(input string) (string, error) { + sizeInGB, err := strconv.Atoi(input) + if err != nil { + return "", err + } + sizeInMB := sizeInGB * 1024 + sizeInKB := sizeInMB * 1024 + sizeInBytes := sizeInKB * 1024 + return strconv.Itoa(sizeInBytes), nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go new file mode 100644 index 000000000..82b6047ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/test_utils.go @@ -0,0 +1,121 @@ +package compute + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + + "time" + + "log" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const ( + _ClientTestUser = "test-user" + _ClientTestDomain = "test-domain" +) + +func newAuthenticatingServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if os.Getenv("ORACLE_LOG") != "" { + log.Printf("[DEBUG] Received request: %s, %s\n", r.Method, r.URL) + } + + if r.URL.Path == "/authenticate/" { + http.SetCookie(w, &http.Cookie{Name: "testAuthCookie", Value: "cookie value"}) + // w.WriteHeader(200) + } else { + handler(w, r) + } + })) +} + +func getTestClient(c *opc.Config) (*Client, error) { + // Build up config with default values if omitted + if c.APIEndpoint == nil { + if os.Getenv("OPC_ENDPOINT") == "" { + panic("OPC_ENDPOINT not set in environment") + } + endpoint, err := url.Parse(os.Getenv("OPC_ENDPOINT")) + if err != nil { + return nil, err + } + c.APIEndpoint = endpoint + } + + if c.IdentityDomain == nil { + domain := os.Getenv("OPC_IDENTITY_DOMAIN") + c.IdentityDomain = &domain + } + + if c.Username == nil { + username := os.Getenv("OPC_USERNAME") + c.Username = &username + } + + if c.Password == nil { + password := os.Getenv("OPC_PASSWORD") + c.Password = &password + } + + if c.HTTPClient == nil { + c.HTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 120 * time.Second}, + } + } + + return NewComputeClient(c) +} + +func getBlankTestClient() (*Client, *httptest.Server, error) { + server := newAuthenticatingServer(func(w http.ResponseWriter, r *http.Request) { + }) + + endpoint, err := url.Parse(server.URL) + if err != nil { + server.Close() + return nil, nil, err + } + + client, err := getTestClient(&opc.Config{ + IdentityDomain: opc.String(_ClientTestDomain), + Username: opc.String(_ClientTestUser), + APIEndpoint: endpoint, + }) + if err != nil { + server.Close() + return nil, nil, err + } + return client, server, nil +} + +// Returns a stub client with default values, and a custom API Endpoint +func getStubClient(endpoint *url.URL) (*Client, error) { + domain := "test" + username := "test" + password := "test" + config := &opc.Config{ + IdentityDomain: &domain, + Username: &username, + Password: &password, + APIEndpoint: endpoint, + } + return getTestClient(config) +} + +func unmarshalRequestBody(t *testing.T, r *http.Request, target interface{}) { + buf := new(bytes.Buffer) + buf.ReadFrom(r.Body) + err := json.Unmarshal(buf.Bytes(), target) + if err != nil { + t.Fatalf("Error marshalling request: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go new file mode 100644 index 000000000..825bb8fe8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic.go @@ -0,0 +1,52 @@ +package compute + +type VirtNICsClient struct { + ResourceClient +} + +func (c *Client) VirtNICs() *VirtNICsClient { + return &VirtNICsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "Virtual NIC", + ContainerPath: "/network/v1/vnic/", + ResourceRootPath: "/network/v1/vnic", + }, + } +} + +type VirtualNIC struct { + // Description of the object. + Description string `json:"description"` + // MAC address of this VNIC. + MACAddress string `json:"macAddress"` + // The three-part name (/Compute-identity_domain/user/object) of the Virtual NIC. + Name string `json:"name"` + // Tags associated with the object. + Tags []string `json:"tags"` + // True if the VNIC is of type "transit". + TransitFlag bool `json:"transitFlag"` + // Uniform Resource Identifier + Uri string `json:"uri"` +} + +// Can only GET a virtual NIC, not update, create, or delete +type GetVirtualNICInput struct { + // The three-part name (/Compute-identity_domain/user/object) of the Virtual NIC. + // Required + Name string `json:"name"` +} + +func (c *VirtNICsClient) GetVirtualNIC(input *GetVirtualNICInput) (*VirtualNIC, error) { + var virtNIC VirtualNIC + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &virtNIC); err != nil { + return nil, err + } + return c.success(&virtNIC) +} + +func (c *VirtNICsClient) success(info *VirtualNIC) (*VirtualNIC, error) { + c.unqualify(&info.Name) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go new file mode 100644 index 000000000..ef9035f26 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/virtual_nic_sets.go @@ -0,0 +1,154 @@ +package compute + +type VirtNICSetsClient struct { + ResourceClient +} + +func (c *Client) VirtNICSets() *VirtNICSetsClient { + return &VirtNICSetsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: "Virtual NIC Set", + ContainerPath: "/network/v1/vnicset/", + ResourceRootPath: "/network/v1/vnicset", + }, + } +} + +// Describes an existing virtual nic set +type VirtualNICSet struct { + // List of ACLs applied to the VNICs in the set. + AppliedACLs []string `json:"appliedAcls"` + // Description of the VNIC Set. + Description string `json:"description"` + // Name of the VNIC set. + Name string `json:"name"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + Tags []string `json:"tags"` + // Uniform Resource Identifier + Uri string `json:"uri"` + // List of VNICs associated with this VNIC set. + VirtualNICs []string `json:"vnics"` +} + +type CreateVirtualNICSetInput struct { + // List of ACLs applied to the VNICs in the set. + // Optional + AppliedACLs []string `json:"appliedAcls"` + // Description of the object. + // Optional + Description string `json:"description"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Object names can contain only alphanumeric, underscore (_), dash (-), and period (.) characters. Object names are case-sensitive. + // Required + Name string `json:"name"` + // Tags associated with this VNIC set. + // Optional + Tags []string `json:"tags"` + // List of VNICs associated with this VNIC set. + // Optional + VirtualNICs []string `json:"vnics"` +} + +func (c *VirtNICSetsClient) CreateVirtualNICSet(input *CreateVirtualNICSetInput) (*VirtualNICSet, error) { + input.Name = c.getQualifiedName(input.Name) + input.AppliedACLs = c.getQualifiedAcls(input.AppliedACLs) + qualifiedNics := c.getQualifiedList(input.VirtualNICs) + if len(qualifiedNics) != 0 { + input.VirtualNICs = qualifiedNics + } + + var virtNicSet VirtualNICSet + if err := c.createResource(input, &virtNicSet); err != nil { + return nil, err + } + + return c.success(&virtNicSet) +} + +type GetVirtualNICSetInput struct { + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Required + Name string `json:"name"` +} + +func (c *VirtNICSetsClient) GetVirtualNICSet(input *GetVirtualNICSetInput) (*VirtualNICSet, error) { + var virtNicSet VirtualNICSet + // Qualify Name + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &virtNicSet); err != nil { + return nil, err + } + + return c.success(&virtNicSet) +} + +type UpdateVirtualNICSetInput struct { + // List of ACLs applied to the VNICs in the set. + // Optional + AppliedACLs []string `json:"appliedAcls"` + // Description of the object. + // Optional + Description string `json:"description"` + // The three-part name (/Compute-identity_domain/user/object) of the virtual NIC set. + // Object names can contain only alphanumeric, underscore (_), dash (-), and period (.) characters. Object names are case-sensitive. + // Required + Name string `json:"name"` + // Tags associated with this VNIC set. + // Optional + Tags []string `json:"tags"` + // List of VNICs associated with this VNIC set. + // Optional + VirtualNICs []string `json:"vnics"` +} + +func (c *VirtNICSetsClient) UpdateVirtualNICSet(input *UpdateVirtualNICSetInput) (*VirtualNICSet, error) { + input.Name = c.getQualifiedName(input.Name) + input.AppliedACLs = c.getQualifiedAcls(input.AppliedACLs) + // Qualify VirtualNICs + qualifiedVNICs := c.getQualifiedList(input.VirtualNICs) + if len(qualifiedVNICs) != 0 { + input.VirtualNICs = qualifiedVNICs + } + + var virtNICSet VirtualNICSet + if err := c.updateResource(input.Name, input, &virtNICSet); err != nil { + return nil, err + } + + return c.success(&virtNICSet) +} + +type DeleteVirtualNICSetInput struct { + // The name of the virtual NIC set. + // Required + Name string `json:"name"` +} + +func (c *VirtNICSetsClient) DeleteVirtualNICSet(input *DeleteVirtualNICSetInput) error { + input.Name = c.getQualifiedName(input.Name) + return c.deleteResource(input.Name) +} + +func (c *VirtNICSetsClient) getQualifiedAcls(acls []string) []string { + qualifiedAcls := []string{} + for _, acl := range acls { + qualifiedAcls = append(qualifiedAcls, c.getQualifiedName(acl)) + } + return qualifiedAcls +} + +func (c *VirtNICSetsClient) unqualifyAcls(acls []string) []string { + unqualifiedAcls := []string{} + for _, acl := range acls { + unqualifiedAcls = append(unqualifiedAcls, c.getUnqualifiedName(acl)) + } + return unqualifiedAcls +} + +func (c *VirtNICSetsClient) success(info *VirtualNICSet) (*VirtualNICSet, error) { + c.unqualify(&info.Name) + info.AppliedACLs = c.unqualifyAcls(info.AppliedACLs) + info.VirtualNICs = c.getUnqualifiedList(info.VirtualNICs) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/helper/testing.go b/vendor/github.com/hashicorp/go-oracle-terraform/helper/testing.go new file mode 100644 index 000000000..eccbdeeb2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/helper/testing.go @@ -0,0 +1,44 @@ +package helper + +import ( + "fmt" + "log" + "math/rand" + "os" + "time" + + "github.com/hashicorp/go-oracle-terraform/opc" +) + +const TestEnvVar = "ORACLE_ACC" + +// Test suite helpers + +type TestCase struct { + // Fields to test stuff with +} + +func Test(t TestT, c TestCase) { + if os.Getenv(TestEnvVar) == "" { + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' is set", TestEnvVar)) + return + } + + // Setup logging Output + logWriter, err := opc.LogOutput() + if err != nil { + t.Error(fmt.Sprintf("Error setting up log writer: %s", err)) + } + log.SetOutput(logWriter) +} + +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +func RInt() int { + rand.Seed(time.Now().UTC().UnixNano()) + return rand.Int() +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go new file mode 100644 index 000000000..0ac83ec88 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/config.go @@ -0,0 +1,21 @@ +package opc + +import ( + "net/http" + "net/url" +) + +type Config struct { + Username *string + Password *string + IdentityDomain *string + APIEndpoint *url.URL + MaxRetries *int + LogLevel LogLevelType + Logger Logger + HTTPClient *http.Client +} + +func NewConfig() *Config { + return &Config{} +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go new file mode 100644 index 000000000..3fa365c1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/convert.go @@ -0,0 +1,5 @@ +package opc + +func String(v string) *string { + return &v +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go new file mode 100644 index 000000000..6b12c10d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/errors.go @@ -0,0 +1,12 @@ +package opc + +import "fmt" + +type OracleError struct { + StatusCode int + Message string +} + +func (e OracleError) Error() string { + return fmt.Sprintf("%d: %s", e.StatusCode, e.Message) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go b/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go new file mode 100644 index 000000000..f9714a7a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/opc/logger.go @@ -0,0 +1,70 @@ +package opc + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +const ( + LogOff LogLevelType = 0 + LogDebug LogLevelType = 1 +) + +type LogLevelType uint + +// Logger interface. Should be satisfied by Terraform's logger as well as the Default logger +type Logger interface { + Log(...interface{}) +} + +type LoggerFunc func(...interface{}) + +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// Returns a default logger if one isn't specified during configuration +func NewDefaultLogger() Logger { + logWriter, err := LogOutput() + if err != nil { + log.Fatalf("Error setting up log writer: %s", err) + } + return &defaultLogger{ + logger: log.New(logWriter, "", log.LstdFlags), + } +} + +// Default logger to satisfy the logger interface +type defaultLogger struct { + logger *log.Logger +} + +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} + +func LogOutput() (logOutput io.Writer, err error) { + // Default to nil + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == LogOff { + return + } + + // Logging is on, set output to STDERR + logOutput = os.Stderr + return +} + +// Gets current Log Level from the ORACLE_LOG env var +func LogLevel() LogLevelType { + envLevel := os.Getenv("ORACLE_LOG") + if envLevel == "" { + return LogOff + } else { + return LogDebug + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 1d3e40282..df4eca514 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1969,6 +1969,24 @@ "path": "github.com/hashicorp/go-multierror", "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, + { + "checksumSHA1": "Sqz9+8frdOIkyK/v4IjjInZAp4Y=", + "path": "github.com/hashicorp/go-oracle-terraform/compute", + "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", + "revisionTime": "2017-03-29T21:19:34Z" + }, + { + "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", + "path": "github.com/hashicorp/go-oracle-terraform/helper", + "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", + "revisionTime": "2017-03-29T21:19:34Z" + }, + { + "checksumSHA1": "AyNRs19Es9pDw2VMxVKWuLx3Afg=", + "path": "github.com/hashicorp/go-oracle-terraform/opc", + "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", + "revisionTime": "2017-03-29T21:19:34Z" + }, { "checksumSHA1": "b0nQutPMJHeUmz4SjpreotAo6Yk=", "path": "github.com/hashicorp/go-plugin", diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 9f2922c21..16c6ae042 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -50,7 +50,7 @@ body.layout-nomad, body.layout-ns1, body.layout-openstack, body.layout-opsgenie, -body.layout-oracleopc, +body.layout-opc, body.layout-packet, body.layout-pagerduty, body.layout-postgresql, diff --git a/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown b/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown new file mode 100644 index 000000000..b479baeba --- /dev/null +++ b/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown @@ -0,0 +1,49 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_network_interface" +sidebar_current: "docs-opc-datasource-network-interface" +description: |- + Gets information about the configuration of an instance's network interface +--- + +# opc\_compute\_network\_interface + +Use this data source to access the configuration of an instance's network interface + +## Example Usage + +``` +data "opc_compute_network_interface" "foo" { + instance_id = "${opc_compute_instance.my_instance.id}" + instance_name = "${opc_compute_instance.my_instance.name}" + interface = "eth0" +} + +output "mac_address" { + value = "${data.opc_compute_network_interface.foo.mac_address}" +} + +output "vnic" { + value = "${data.opc_compute_network_interface.foo.vnic}" +} +``` + +## Argument Reference +* `instance_name` is the name of the instance. +* `instance_id` is the id of the instance. +* `interface` is the name of the attached interface. `eth0`, `eth1`, ... `eth9`. + +## Attributes Reference + +* `dns` - Array of DNS servers for the interface. +* `ip_address` - IP Address assigned to the interface. +* `ip_network` - The IP Network assigned to the interface. +* `mac_address` - The MAC address of the interface. +* `model` - The model of the NIC card used. +* `name_servers` - Array of name servers for the interface. +* `nat` - The IP Reservation (in IP Networks) associated with the interface. +* `search_domains` - The search domains that are sent through DHCP as option 119. +* `sec_lists` - The security lists the interface is added to. +* `shared_network` - Whether or not the interface is inside the Shared Network or an IP Network. +* `vnic` - The name of the vNIC created for the IP Network. +* `vnic_sets` - The array of vNIC Sets the interface was added to. diff --git a/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown b/website/source/docs/providers/opc/d/opc_compute_vnic.html.markdown similarity index 85% rename from website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown rename to website/source/docs/providers/opc/d/opc_compute_vnic.html.markdown index 78be49c4a..8656c85c2 100644 --- a/website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown +++ b/website/source/docs/providers/opc/d/opc_compute_vnic.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_vnic" -sidebar_current: "docs-oracleopc-datasource-vnic" +sidebar_current: "docs-opc-datasource-vnic" description: |- Gets information about the configuration of a Virtual NIC. --- @@ -13,7 +13,9 @@ Use this data source to access the configuration of a Virtual NIC. ## Example Usage ``` -data "opc_compute_vnic" "current" {} +data "opc_compute_vnic" "current" { + name = "my_vnic_name" +} output "mac_address" { value = "${data.opc_compute_vnic.current.mac_address}" diff --git a/website/source/docs/providers/oracleopc/index.html.markdown b/website/source/docs/providers/opc/index.html.markdown similarity index 96% rename from website/source/docs/providers/oracleopc/index.html.markdown rename to website/source/docs/providers/opc/index.html.markdown index 598346919..1086f310a 100644 --- a/website/source/docs/providers/oracleopc/index.html.markdown +++ b/website/source/docs/providers/opc/index.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Provider: Oracle Public Cloud" -sidebar_current: "docs-oracleopc-index" +sidebar_current: "docs-opc-index" description: |- The Oracle Public Cloud provider is used to interact with the many resources supported by the Oracle Public Cloud. The provider needs to be configured with credentials for the Oracle Public Cloud API. --- @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ``` # Configure the Oracle Public Cloud -provider "oracle" { +provider "opc" { user = "..." password = "..." identity_domain = "..." diff --git a/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown b/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown new file mode 100644 index 000000000..6f26c777b --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown @@ -0,0 +1,45 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_acl" +sidebar_current: "docs-opc-resource-acl" +description: |- + Creates and manages an ACL in an OPC identity domain. +--- + +# opc\_compute\_acl + +The ``opc_compute_acl`` resource creates and manages an ACL in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_acl" "default" { + name = "ACL1" + description = "This is a description for an acl" + tags = ["tag1", "tag2"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the ACL. + +* `enabled` - (Optional) Enables or disables the ACL. Set to true by default. + +* `description` - (Optional) A description of the ACL. + +* `tags` - (Optional) List of tags that may be applied to the ACL. + +In addition to the above, the following values are exported: + +* `uri` - The Uniform Resource Identifier for the ACL + +## Import + +ACL's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_acl.acl1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown new file mode 100644 index 000000000..f5899520b --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_image_list" +sidebar_current: "docs-opc-resource-image-list" +description: |- + Creates and manages an Image List in an OPC identity domain. +--- + +# opc\_compute\_image\_list + +The ``opc_compute_image_list`` resource creates and manages an Image List in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_image_list" "test" { + name = "imagelist1" + description = "This is a description of the Image List" + default = 21 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Image List. + +* `description` - (Required) A description of the Image List. + +* `default` - (Required) The image list entry to be used, by default, when launching instances using this image list. Defaults to `1`. + +## Import + +Image List's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_image_list.imagelist1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown new file mode 100644 index 000000000..56b6240c4 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown @@ -0,0 +1,169 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_instance" +sidebar_current: "docs-opc-resource-instance" +description: |- + Creates and manages an instance in an OPC identity domain. +--- + +# opc\_compute\_instance + +The ``opc_compute_instance`` resource creates and manages an instance in an OPC identity domain. + +~> **Caution:** The ``opc_compute_instance`` resource can completely delete your +instance just as easily as it can create it. To avoid costly accidents, +consider setting +[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) +on your instance resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_instance" "test_instance" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + sshKeys = ["${opc_compute_ssh_key.key1.name}"] + networking_info { + index = 0 + model = "e1000" + nat = ["ippool:/oracle/public/ippool"] + shared_network = true + } + networking_info { + index = 1 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "testing-vnic-name" + shared_network = false + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the instance. + +* `shape` - (Required) The shape of the instance, e.g. `oc4`. + +* `instance_attributes` - (Optional) A JSON string of custom attributes. See [Attributes](#attributes) below for more information. + +* `boot_order` - (Optional) The index number of the bootable storage volume, presented as a list, that should be used to boot the instance. The only valid value is `[1]`. If you set this attribute, you must also specify a bootable storage volume with index number 1 in the volume sub-parameter of storage_attachments. When you specify boot_order, you don't need to specify the imagelist attribute, because the instance is booted using the image on the specified bootable storage volume. If you specify both boot_order and imagelist, the imagelist attribute is ignored. + +* `hostname` - (Optional) The host name assigned to the instance. On an Oracle Linux instance, this host name is displayed in response to the hostname command. Only relative DNS is supported. The domain name is suffixed to the host name that you specify. The host name must not end with a period. If you don't specify a host name, then a name is generated automatically. + +* `image_list` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1`. + +* `label` - (Optional) The label to apply to the instance. + +* `networking_info` - (Optional) Information pertaining to an individual network interface to be created and attached to the instance. See [Networking Info](#networking-info) below for more information. + +* `reverse_dns` - (Optional) If set to `true` (default), then reverse DNS records are created. If set to `false`, no reverse DNS records are created. + +* `ssh_keys` - (Optional) A list of the names of the SSH Keys that can be used to log into the instance. + +* `tags` - (Optional) A list of strings that should be supplied to the instance as tags. + +## Attributes + +During instance creation, there are several custom attributes that a user may wish to make available to the instance during instance creation. +These attributes can be specified via the `instance_attributes` field, and must be presented as a string in JSON format. +The easiest way to populate this field is with a HEREDOC: + +```hcl +resource "opc_compute_instance" "foo" { + name = "test" + label = "test" + shape = "oc3" + imageList = "/oracle/public/oel_6.4_2GB_v1" + instance_attributes = </@// +``` + +The instance can be imported as such: + +``` +terraform import opc_compute_instance.instance1 instance_name/instance_id +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown new file mode 100644 index 000000000..3220644d5 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown @@ -0,0 +1,45 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_ip_address_prefix_set" +sidebar_current: "docs-opc-resource-ip-address-prefix-set" +description: |- + Creates and manages an IP address prefix set in an OPC identity domain. +--- + +# opc\_compute\_ip\_address\_prefix\_set + +The ``opc_compute_ip_address_prefix_set`` resource creates and manages an IP address prefix set in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_address_prefix_set" "default" { + name = "PrefixSet1" + prefixes = ["192.168.0.0/16", "172.120.0.0/24"] + tags = ["tags1", "tags2"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the ip address prefix set. + +* `prefixes` - (Optional) List of CIDR IPv4 prefixes assigned in the virtual network. + +* `description` - (Optional) A description of the ip address prefix set. + +* `tags` - (Optional) List of tags that may be applied to the ip address prefix set. + +In addition to the above, the following variables are exported: + +* `uri` - (Computed) The Uniform Resource Identifier of the ip address prefix set. + +## Import + +IP Address Prefix Set can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_address_prefix_set.default example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown new file mode 100644 index 000000000..00f0befd9 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown @@ -0,0 +1,38 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_ip_address_reservation" +sidebar_current: "docs-opc-resource-ip-address-reservation" +description: |- + Creates and manages an IP address reservation in an OPC identity domain. +--- + +# opc\_compute\_ip\_address\_reservation + +The ``opc_compute_ip_address_reservation`` resource creates and manages an IP address reservation in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_address_reservation" "default" { + name = "IPAddressReservation1" + ip_address_pool = "public-ippool" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the ip address reservation. + +* `ip_address_pool` - (Required) The IP address pool from which you want to reserve an IP address. + +* `description` - (Optional) A description of the ip address reservation. + +* `tags` - (Optional) List of tags that may be applied to the IP address reservation. + +In addition to the above, the following attributes are exported: + +* `ip_address` - Reserved NAT IPv4 address from the IP address pool. + +* `uri` - The Uniform Resource Identifier of the ip address reservation diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown similarity index 75% rename from website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown index 2518b2df1..a148875ff 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_ip_association" -sidebar_current: "docs-oracleopc-resource-ip-association" +sidebar_current: "docs-opc-resource-ip-association" description: |- Creates and manages an IP association in an OPC identity domain. --- @@ -29,3 +29,18 @@ The following arguments are supported: * `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. + + +## Attributes Reference + +The following attributes are exported: + +* `name` The name of the IP Association + +## Import + +IP Associations can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_association.association1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown new file mode 100644 index 000000000..6ce1977ba --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown @@ -0,0 +1,54 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_ip_network" +sidebar_current: "docs-opc-resource-ip-network" +description: |- + Creates and manages an IP Network +--- + +# opc\_compute\_ip_network + +The ``opc_compute_ip_network`` resource creates and manages an IP Network. + +## Example Usage + +``` +resource "opc_compute_ip_network" "foo" { + name = "my-ip-network" + description = "my IP Network" + ip_address_prefix = "10.0.1.0/24" + ip_network_exchange = "${opc_compute_ip_exchange.foo.name}" + public_napt_enabled = false + tags = ["tag1", "tag2"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the IP Network. + +* `ip_address_prefix` - (Required) The IPv4 address prefix, in CIDR format. + +* `description` - (Optional) The description of the IP Network. + +* `ip_network_exchange` - (Optional) Specify the IP Network exchange to which the IP Network belongs to. + +* `public_napt_enabled` - (Optional) If true, enable public internet access using NAPT for VNICs without any public IP Reservation. Defaults to `false`. + +## Attributes Reference + +The following attributes are exported: + +* `name` - The name of the IP Network + +* `ip_address_prefix` - The IPv4 address prefix, in CIDR format. + +* `description` - The description of the IP Network. + +* `ip_network_exchange` - The IP Network Exchange for the IP Network + +* `public_napt_enabled` - Whether public internet access using NAPT for VNICs without any public IP Reservation or not. + +* `uri` - Uniform Resource Identifier for the IP Network diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown new file mode 100644 index 000000000..604d95c37 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_ip_network_exchange" +sidebar_current: "docs-opc-resource-ip-network-exchange" +description: |- + Creates and manages an IP network exchange in an OPC identity domain. +--- + +# opc\_compute\_ip\_network\_exchange + +The ``opc_compute_ip_network_exchange`` resource creates and manages an IP network exchange in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_network_exchange" "default" { + name = "NetworkExchange1" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the ip network exchange. + +* `description` - (Optional) A description of the ip network exchange. + +* `tags` - (Optional) List of tags that may be applied to the IP network exchange. + +## Import + +IP Network Exchange's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_network_exchange.exchange1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown similarity index 70% rename from website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown index 44b70cc0f..f937e03f9 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_ip_reservation" -sidebar_current: "docs-oracleopc-resource-ip-reservation" +sidebar_current: "docs-opc-resource-ip-reservation" description: |- Creates and manages an IP reservation in an OPC identity domain. --- @@ -14,7 +14,7 @@ The ``opc_compute_ip_reservation`` resource creates and manages an IP reservatio ``` resource "opc_compute_ip_reservation" "reservation1" { - parentpool = "/oracle/public/ippool" + parent_pool = "/oracle/public/ippool" permanent = true tags = [] } @@ -24,10 +24,18 @@ resource "opc_compute_ip_reservation" "reservation1" { The following arguments are supported: -* `parentpool` - (Required) The pool from which to allocate the IP address. +* `parent_pool` - (Required) The pool from which to allocate the IP address. * `permanent` - (Required) Whether the IP address remains reserved even when it is no longer associated with an instance (if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or deleted and recreated (if false). * `tags` - (Optional) List of tags that may be applied to the IP reservation. + +## Import + +IP Reservations can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_reservations.reservation1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_route.html.markdown b/website/source/docs/providers/opc/r/opc_compute_route.html.markdown new file mode 100644 index 000000000..6fb86c860 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_route.html.markdown @@ -0,0 +1,60 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_route" +sidebar_current: "docs-opc-resource-route" +description: |- + Creates and manages a Route resource for an IP Network +--- + +# opc\_compute\_route + +The ``opc_compute_route`` resource creates and manages a route for an IP Network. + +## Example Usage + +``` +resource "opc_compute_route" "foo" { + name = "my-route" + description = "my IP Network route" + admin_distance = 1 + ip_address_prefix = "10.0.1.0/24" + next_hop_vnic_set = "${opc_compute_vnic_set.bar.name}" + tags = ["tag1", "tag2"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the route. + +* `description` - (Optional) The description of the route. + +* `admin_distance` - (Optional) The route's administrative distance. Defaults to `0`. + +* `ip_address_prefix` - (Required) The IPv4 address prefix, in CIDR format, of the external network from which to route traffic. + +* `next_hop_vnic_set` - (Required) Name of the virtual NIC set to route matching packets to. Routed flows are load-balanced among all the virtual NICs in the virtual NIC set. + +## Attributes Reference + +The following attributes are exported: + +* `name` The name of the route + +* `description` - The description of the route. + +* `admin_distance` - The route's administrative distance. Defaults to `0`. + +* `ip_address_prefix` - The IPv4 address prefix, in CIDR format, of the external network from which to route traffic. + +* `next_hop_vnic_set` - Name of the virtual NIC set to route matching packets to. Routed flows are load-balanced among all the virtual NICs in the virtual NIC set. + +## Import + +Route's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_route.route1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown similarity index 54% rename from website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown index 6497b0265..df222ee49 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown @@ -1,27 +1,24 @@ --- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_rule" -sidebar_current: "docs-oracleopc-resource-security-rule" +layout: "opc" +page_title: "Oracle: opc_compute_sec_rule" +sidebar_current: "docs-opc-resource-sec-rule" description: |- - Creates and manages a security rule in an OPC identity domain. + Creates and manages a sec rule in an OPC identity domain. --- -# opc\_compute\_ip\_reservation +# opc\_compute\_sec\_rule -The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain, which joins -together a source security list (or security IP list), a destination security list (or security IP list), and a security -application. +The ``opc_compute_sec_rule`` resource creates and manages a sec rule in an OPC identity domain, which joinstogether a source security list (or security IP list), a destination security list (or security IP list), and a security application. ## Example Usage ``` -resource "opc_compute_security_rule" "test_rule" { +resource "opc_compute_sec_rule" "test_rule" { name = "test" source_list = "seclist:${opc_compute_security_list.sec-list1.name}" destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" action = "permit" application = "${opc_compute_security_application.spring-boot.name}" - disabled = false } ``` @@ -31,6 +28,8 @@ The following arguments are supported: * `name` - (Required) The unique (within the identity domain) name of the security rule. +* `description` - (Optional) A description for this security rule. + * `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with `seciplist:`). @@ -42,5 +41,17 @@ The following arguments are supported: * `action` - (Required) Whether to `permit`, `refuse` or `deny` packets to which this rule applies. This will ordinarily be `permit`. -* `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule -without removing it outright from your Terraform resource definition. +* `disabled` - (Optional) Whether to disable this security rule. This is useful if you want to temporarily disable a rule +without removing it outright from your Terraform resource definition. Defaults to `false`. + +In addition to the above, the following values are exported: + +* `uri` - The Uniform Resource Identifier of the sec rule. + +## Import + +Sec Rule's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_sec_rule.rule1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown similarity index 55% rename from website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown index 94760f082..29e3af03d 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_security_application" -sidebar_current: "docs-oracleopc-resource-security-application" +sidebar_current: "docs-opc-resource-security-application" description: |- Creates and manages a security application in an OPC identity domain. --- @@ -10,13 +10,24 @@ description: |- The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. -## Example Usage +## Example Usage (TCP) ``` resource "opc_compute_security_application" "tomcat" { - name = "tomcat" + name = "tomcat" protocol = "tcp" - dport = "8080" + dport = "8080" +} +``` + +## Example Usage (ICMP) + +``` +resource "opc_compute_security_application" "tomcat" { + name = "tomcat" + protocol = "icmp" + icmptype = "echo" + icmpcode = "protocol" } ``` @@ -26,14 +37,21 @@ The following arguments are supported: * `name` - (Required) The unique (within the identity domain) name of the application -* `protocol` - (Required) The protocol to enable for this application. Must be either one of -`tcp`, `udp`, `icmp`, `igmp`, `ipip`, `rdp`, `esp`, `ah`, `gre`, `icmpv6`, `ospf`, `pim`, `sctp`, `mplsip` or `all`, or -the corresponding integer in the range 0-254 from the list of [assigned protocol numbers](http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) +* `protocol` - (Required) The protocol to enable for this application. Must be one of +`tcp`, `udp`, `ah`, `esp`, `icmp`, `icmpv6`, `igmp`, `ipip`, `gre`, `mplsip`, `ospf`, `pim`, `rdp`, `sctp` or `all`. -* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. +* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. This must be set if the `protocol` is set to `tcp` or `udp`. * `icmptype` - (Optional) The ICMP type to enable for this application, if the `protocol` is `icmp`. Must be one of `echo`, `reply`, `ttl`, `traceroute`, `unreachable`. * `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of -`network`, `host`, `protocol`, `port`, `df`, `admin`. +`admin`, `df`, `host`, `network`, `port` or `protocol`. + +## Import + +Security Application's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_security_application.application1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown similarity index 55% rename from website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown index 49207c879..0cbe442ef 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_security_association" -sidebar_current: "docs-oracleopc-resource-security-association" +sidebar_current: "docs-opc-resource-security-association" description: |- Creates and manages a security association in an OPC identity domain. --- @@ -15,8 +15,9 @@ list in an OPC identity domain. ``` resource "opc_compute_security_association" "test_instance_sec_list_1" { - vcable = "${opc_compute_instance.test_instance.vcable}" - seclist = "${opc_compute_security_list.sec_list1.name}" + name = "association1" + vcable = "${opc_compute_instance.test_instance.vcable}" + seclist = "${opc_compute_security_list.sec_list1.name}" } ``` @@ -24,6 +25,16 @@ resource "opc_compute_security_association" "test_instance_sec_list_1" { The following arguments are supported: +* `name` - (Optional) The Name for the Security Association. If not specified, one is created automatically. Changing this forces a new resource to be created. + * `vcable` - (Required) The `vcable` of the instance to associate to the security list. * `seclist` - (Required) The name of the security list to associate the instance to. + +## Import + +Security Association's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_security_association.association1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown similarity index 71% rename from website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown index 62f40d839..8873d4d49 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_security_ip_list" -sidebar_current: "docs-oracleopc-resource-security-ip-list" +sidebar_current: "docs-opc-resource-security-list" description: |- Creates and manages a security IP list in an OPC identity domain. --- @@ -26,3 +26,13 @@ The following arguments are supported: * `name` - (Required) The unique (within the identity domain) name of the security IP list. * `ip_entries` - (Required) The IP addresses to include in the list. + +* `description` - (Optional) The description of the security ip list. + +## Import + +IP List's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_list.list1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown similarity index 68% rename from website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown index 64547a41e..ea92fc8c3 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown @@ -1,12 +1,12 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_security_list" -sidebar_current: "docs-oracleopc-resource-security-list" +sidebar_current: "docs-opc-resource-security-list" description: |- Creates and manages a security list in an OPC identity domain. --- -# opc\_compute\_security\_list +# opc\_compute\_ip\_reservation The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. @@ -14,9 +14,9 @@ The ``opc_compute_security_list`` resource creates and manages a security list i ``` resource "opc_compute_security_list" "sec_list1" { - name = "sec-list-1" - policy = "permit" - outbound_cidr_policy = "deny" + name = "sec-list-1" + policy = "permit" + outbound_cidr_policy = "deny" } ``` @@ -29,5 +29,13 @@ The following arguments are supported: * `policy` - (Required) The policy to apply to instances associated with this list. Must be one of `permit`, `reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). -* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, +* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list. Must be one of `permit`, `reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). + +## Import + +Security List's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_security_list.list1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown new file mode 100644 index 000000000..612ab88b9 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_security_protocol" +sidebar_current: "docs-opc-resource-security-protocol" +description: |- + Creates and manages an security protocol in an OPC identity domain. +--- + +# opc\_compute\_security\_protocol + +The ``opc_compute_security_protocol`` resource creates and manages a security protocol in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_protocol" "default" { + name = "security-protocol-1" + dst_ports = ["2045-2050"] + src_ports = ["3045-3060"] + ip_protocol = "tcp" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the security protocol. + +* `dst_ports` (Optional) Enter a list of port numbers or port range strings. + Traffic is enabled by a security rule when a packet's destination port matches the + ports specified here. + For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535, + inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + If no destination ports are specified, all destination ports or ICMP types are allowed. + +* `src_ports` (Optional) Enter a list of port numbers or port range strings. + Traffic is enabled by a security rule when a packet's source port matches the + ports specified here. + For TCP, SCTP, and UDP, each port is a source transport port, + between 0 and 65535, inclusive. + For ICMP, each port is an ICMP type, between 0 and 255, inclusive. + If no source ports are specified, all source ports or ICMP types are allowed. + +* `ip_protocol` (Optional) The protocol used in the data portion of the IP datagram. + Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp, + mplsip, all. + Traffic is enabled by a security rule when the protocol in the packet matches the + protocol specified here. If no protocol is specified, all protocols are allowed. + +* `description` - (Optional) A description of the security protocol. + +* `tags` - (Optional) List of tags that may be applied to the security protocol. + +In addition to the above, the following values are exported: + +* `uri` - The Uniform Resource Identifier for the Security Protocol + +## Import + +ACL's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_security_protocol.default example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown new file mode 100644 index 000000000..c501517ad --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown @@ -0,0 +1,62 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_security_rule" +sidebar_current: "docs-opc-resource-security-rule" +description: |- + Creates and manages a security rule in an OPC identity domain. +--- + +# opc\_compute\_security\_rule + +The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_security_rule" "default" { + name = "SecurityRule1" + flow_direction = "ingress" + acl = "${opc_compute_acl.default.name}" + security_protocols = ["${opc_compute_security_protocol.default.name}"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the security rule. + +* `flow_direction` - (Required) Specify the direction of flow of traffic, which is relative to the instances, for this security rule. Allowed values are ingress or egress. + +* `disabled` - (Optional) Whether to disable this security rule. This is useful if you want to temporarily disable a rule without removing it outright from your Terraform resource definition. Defaults to `false`. + +* `acl` - (Optional) Name of the ACL that contains this security rule. + +* `dst_ip_address_prefixes` - (Optional) List of IP address prefix set names to match the packet's destination IP address. + +* `src_ip_address_prefixes` - (Optional) List of names of IP address prefix set to match the packet's source IP address. + +* `dst_vnic_set` - (Optional) Name of virtual NIC set containing the packet's destination virtual NIC. + +* `src_vnic_set` - (Optional) Name of virtual NIC set containing the packet's source virtual NIC. + +* `security_protocols` - (Optional) List of security protocol object names to match the packet's protocol and port. + +* `description` - (Optional) A description of the security rule. + +* `tags` - (Optional) List of tags that may be applied to the security rule. + +## Attributes Reference + +In addition to the above, the following attributes are exported: + +* `uri` - The Uniform Resource Identifier of the security rule. + +## Import + +Security Rule's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_security_rule.rule1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown similarity index 72% rename from website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown rename to website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown index ff85467d8..fd1dcbd9d 100644 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown @@ -1,7 +1,7 @@ --- -layout: "oracleopc" +layout: "opc" page_title: "Oracle: opc_compute_ssh_key" -sidebar_current: "docs-oracleopc-resource-ssh-key" +sidebar_current: "docs-opc-resource-ssh-key" description: |- Creates and manages an SSH key in an OPC identity domain. --- @@ -28,5 +28,13 @@ The following arguments are supported: * `key` - (Required) The SSH key itself -* `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, -without removing it entirely from your Terraform resource definition. +* `enabled` - (Optional) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, +without removing it entirely from your Terraform resource definition. Defaults to `true` + +## Import + +SSH Key's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ssh_key.key1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown new file mode 100644 index 000000000..103f48eea --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -0,0 +1,78 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_storage_volume" +sidebar_current: "docs-opc-resource-storage-volume" +description: |- + Creates and manages a storage volume in an OPC identity domain. +--- + +# opc\_compute\_storage\_volume + +The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain. + +~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your storage volume just as easily as it can create it. To avoid costly accidents, consider setting [``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) on your storage volume resources as an extra safety measure. + +## Example Usage + +``` +resource "opc_compute_storage_volume" "test" { + name = "storageVolume1" + description = "Description for the Storage Volume" + size = 10 + tags = ["bar", "foo"] +} +``` + +## Example Usage (Bootable Volume) +``` +resource "opc_compute_image_list" "test" { + name = "imageList1" + description = "Description for the Image List" +} + +resource "opc_compute_storage_volume" "test" { + name = "storageVolume1" + description = "Description for the Bootable Storage Volume" + size = 30 + tags = ["first", "second"] + bootable { + image_list = "${opc_compute_image_list.test.name}" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` (Required) The name for the Storage Account. +* `description` (Optional) The description of the storage volume. +* `size` (Required) The size of this storage volume in GB. The allowed range is from 1 GB to 2 TB (2048 GB). +* `storage_type` - (Optional) - The Type of Storage to provision. Possible values are `/oracle/public/storage/latency` or `/oracle/public/storage/default`. Defaults to `/oracle/public/storage/default`. +* `bootable` - (Optional) A `bootable` block as defined below. +* `tags` - (Optional) Comma-separated strings that tag the storage volume. + +`bootable` supports the following: +* `image_list` - (Optional) Defines an image list. +* `image_list_entry` - (Optional) Defines an image list entry. + +## Attributes Reference + +The following attributes are exported: + +* `hypervisor` - The hypervisor that this volume is compatible with. +* `machine_image` - Name of the Machine Image - available if the volume is a bootable storage volume. +* `managed` - Is this a Managed Volume? +* `platform` - The OS platform this volume is compatible with. +* `readonly` - Can this Volume be attached as readonly? +* `status` - The current state of the storage volume. +* `storage_pool` - The storage pool from which this volume is allocated. +* `uri` - Unique Resource Identifier of the Storage Volume. + +## Import + +Storage Volume's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_storage_volume.volume1 example +``` diff --git a/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown new file mode 100644 index 000000000..a0531b948 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown @@ -0,0 +1,45 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_vnic_set" +sidebar_current: "docs-opc-resource-vnic-set" +description: |- + Creates and manages a virtual NIC set in an OPC identity domain +--- + +# opc\_compute\_vnic\_set + +The ``opc_compute_vnic_set`` resource creates and manages a virtual NIC set in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_vnic_set" "test_set" { + name = "test_vnic_set" + description = "My vnic set" + applied_acls = ["acl1", "acl2"] + virtual_nics = ["nic1", "nic2", "nic3"] + tags = ["xyzzy", "quux"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The unique (within this identity domain) name of the virtual nic set. + +* `description` - (Optional) A description of the virtual nic set. + +* `applied_acls` - (Optional) A list of the ACLs to apply to the virtual nics in the set. + +* `virtual_nics` - (Optional) List of virtual NICs associated with this virtual NIC set. + +* `tags` - (Optional) A list of tags to apply to the storage volume. + +## Import + +VNIC Set's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_vnic_set.set1 example +``` diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown deleted file mode 100644 index faeb3ee7c..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown +++ /dev/null @@ -1,68 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_instance" -sidebar_current: "docs-oracleopc-resource-instance" -description: |- - Creates and manages an instance in an OPC identity domain. ---- - -# opc\_compute\_instance - -The ``opc_compute_instance`` resource creates and manages an instance in an OPC identity domain. - -~> **Caution:** The ``opc_compute_instance`` resource can completely delete your -instance just as easily as it can create it. To avoid costly accidents, -consider setting -[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) -on your instance resources as an extra safety measure. - -## Example Usage - -``` -resource "opc_compute_instance" "test_instance" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.key1.name}"] - attributes = "{\"foo\":\"bar\"}" - storage = [{ - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - }, - { - index = 2 - volume = "${opc_compute_storage_volume.test_volume2.name}" - }] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The name of the instance. This need not be unique, as each instance is assigned a separate -computed `opcId`. - -* `shape` - (Required) The shape of the instance, e.g. `oc4`. - -* `imageList` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1` - -* `label` - (Optional) The label to apply to the instance. - -* `ip` - (Computed) The internal IP address assigned to the instance. - -* `opcId` - (Computed) The interned ID assigned to the instance. - -* `sshKeys` - (Optional) The names of the SSH Keys that can be used to log into the instance. - -* `attributes` - (Optional) An arbitrary JSON-formatted collection of attributes which is made available to the instance. - -* `vcable` - (Computed) The ID of the instance's VCable, which is used to associate it with reserved IP addresses and -add it to Security Lists. - -* `storage` - (Optional) A set of zero or more storage volumes to attach to the instance. Each volume has two arguments: -`index`, which is the volume's index in the instance's list of mounted volumes, and `name`, which is the name of the -storage volume to mount. - -* `bootOrder` - (Optional) The index number of the bootable storage volume that should be used to boot the instance. e.g. `[ 1 ]`. If you specify both `bootOrder` and `imageList`, the imagelist attribute is ignored. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown deleted file mode 100644 index 4b30b59ed..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown +++ /dev/null @@ -1,49 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_storage_volume" -sidebar_current: "docs-oracleopc-resource-storage-volume" -description: |- - Creates and manages a storage volume in an OPC identity domain. ---- - -# opc\_compute\_storage\_volume - -The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain. - -~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your -storage volume just as easily as it can create it. To avoid costly accidents, -consider setting -[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) -on your storage volume resources as an extra safety measure. - -## Example Usage - -``` -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My storage volume" - name = "test_volume_a" - tags = ["xyzzy", "quux"] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within this identity domain) name of the storage volume. - -* `size` - (Required) The size of the storage instance. - -* `description` - (Optional) A description of the storage volume. - -* `tags` - (Optional) A list of tags to apply to the storage volume. - -* `bootableImage` - (Optional) The name of the bootable image the storage volume is loaded with. - -* `bootableImageVersion` - (Optional) The version of the bootable image specified in `bootableImage` to use. - -* `snapshot` - (Optional) The snapshot to initialise the storage volume with. This has two nested properties: `name`, -for the name of the snapshot to use, and `account` for the name of the snapshot account to use. - -* `snapshotId` - (Optional) The id of the snapshot to initialise the storage volume with. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 77d5bf2c3..a0cd152d8 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -355,8 +355,8 @@ OpsGenie - > - Oracle OPC + > + Oracle OPC > diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb new file mode 100644 index 000000000..8c9bfd793 --- /dev/null +++ b/website/source/layouts/opc.erb @@ -0,0 +1,94 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + +<% end %> +<%= yield %> +<% end %> diff --git a/website/source/layouts/oracleopc.erb b/website/source/layouts/oracleopc.erb deleted file mode 100644 index a9d9579f8..000000000 --- a/website/source/layouts/oracleopc.erb +++ /dev/null @@ -1,59 +0,0 @@ -<% wrap_layout :inner do %> -<% content_for :sidebar do %> - -<% end %> - -<%= yield %> -<% end %> From 50288816f2a025237d522d63f2657affb244e63c Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Mon, 3 Apr 2017 23:45:12 -0400 Subject: [PATCH 018/342] Updating mapstructure vendor for opc provider --- .../mitchellh/mapstructure/decode_hooks.go | 5 +- .../mitchellh/mapstructure/mapstructure.go | 148 +++++++++++++----- vendor/vendor.json | 5 +- 3 files changed, 120 insertions(+), 38 deletions(-) diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index aa91f76ce..115ae67c1 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -72,7 +72,10 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } } return data, nil diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 40be5116d..6dee0ef0a 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,5 +1,5 @@ // The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. +// arbitrary map[string]interface{} into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested @@ -8,6 +8,7 @@ package mapstructure import ( + "encoding/json" "errors" "fmt" "reflect" @@ -67,6 +68,10 @@ type DecoderConfig struct { // FALSE, false, False. Anything else is an error) // - empty array = empty map and vice versa // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. // WeaklyTypedInput bool @@ -200,7 +205,7 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error d.config.DecodeHook, dataVal.Type(), val.Type(), data) if err != nil { - return err + return fmt.Errorf("error decoding '%s': %s", name, err) } } @@ -227,6 +232,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error err = d.decodePtr(name, data, val) case reflect.Slice: err = d.decodeSlice(name, data, val) + case reflect.Func: + err = d.decodeFunc(name, data, val) default: // If we reached this point then we weren't able to decode it return fmt.Errorf("%s: unsupported type: %s", name, dataKind) @@ -245,6 +252,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + dataValType := dataVal.Type() if !dataValType.AssignableTo(val.Type()) { return fmt.Errorf( @@ -301,6 +312,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -322,6 +334,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er } else { return fmt.Errorf("cannot parse '%s' as int: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -408,6 +428,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -429,6 +450,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) } else { return fmt.Errorf("cannot parse '%s' as float: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -456,15 +485,30 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er // Check input type dataVal := reflect.Indirect(reflect.ValueOf(data)) if dataVal.Kind() != reflect.Map { - // Accept empty array/slice instead of an empty map in weakly typed mode - if d.config.WeaklyTypedInput && - (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && - dataVal.Len() == 0 { - val.Set(valMap) - return nil - } else { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) } // Accumulate errors @@ -507,7 +551,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - realVal := reflect.New(valElemType) + + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { return err } @@ -516,6 +565,19 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er return nil } +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataValKind := dataVal.Kind() @@ -523,26 +585,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - } - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } // Accumulate any errors errors := make([]string, 0) for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } currentField := valSlice.Index(i) fieldName := fmt.Sprintf("%s[%d]", name, i) @@ -607,17 +687,10 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) structs = structs[1:] structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - } + fieldKind := fieldType.Type.Kind() // If "squash" is specified in the tag, we squash the field down. squash := false @@ -630,7 +703,12 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } if squash { - structs = append(structs, val.FieldByName(fieldType.Name)) + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } continue } @@ -653,7 +731,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) if !rawMapVal.IsValid() { // Do a slower search by iterating over each key and // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { + for dataValKey := range dataValKeys { mK, ok := dataValKey.Interface().(string) if !ok { // Not a string key @@ -701,7 +779,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { keys = append(keys, rawKey.(string)) } sort.Strings(keys) @@ -716,7 +794,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Add the unused keys to the list of unused keys if we're tracking metadata if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { key = fmt.Sprintf("%s.%s", name, key) diff --git a/vendor/vendor.json b/vendor/vendor.json index df4eca514..ed725ac15 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2582,9 +2582,10 @@ "revision": "6b17d669fac5e2f71c16658d781ec3fdd3802b69" }, { - "checksumSHA1": "4Js6Jlu93Wa0o6Kjt393L9Z7diE=", + "checksumSHA1": "MlX15lJuV8DYARX5RJY8rqrSEWQ=", "path": "github.com/mitchellh/mapstructure", - "revision": "281073eb9eb092240d33ef253c404f1cca550309" + "revision": "53818660ed4955e899c0bcafa97299a388bd7c8e", + "revisionTime": "2017-03-07T20:11:23Z" }, { "checksumSHA1": "e/MV3GL8ZOpqyNSKVPtMeqTRR/w=", From 9e823d2bc7569b619330a94efd919af90b8b90f9 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Mon, 3 Apr 2017 23:48:35 -0400 Subject: [PATCH 019/342] gofmt files --- builtin/providers/oracleopc/config.go | 8 ++++---- builtin/providers/oracleopc/resource_instance.go | 2 +- builtin/providers/oracleopc/resource_ip_reservation.go | 2 +- builtin/providers/oracleopc/resource_storage_volume.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/builtin/providers/oracleopc/config.go b/builtin/providers/oracleopc/config.go index fbae3b5d5..9e4c134e7 100644 --- a/builtin/providers/oracleopc/config.go +++ b/builtin/providers/oracleopc/config.go @@ -15,13 +15,13 @@ type Config struct { } type storageAttachment struct { - index int + index int instanceName *compute.InstanceName } type OPCClient struct { *compute.AuthenticatedClient - MaxRetryTimeout int + MaxRetryTimeout int storageAttachmentsByVolumeCache map[string][]storageAttachment } @@ -38,8 +38,8 @@ func (c *Config) Client() (*OPCClient, error) { } opcClient := &OPCClient{ - AuthenticatedClient: authenticatedClient, - MaxRetryTimeout: c.MaxRetryTimeout, + AuthenticatedClient: authenticatedClient, + MaxRetryTimeout: c.MaxRetryTimeout, storageAttachmentsByVolumeCache: make(map[string][]storageAttachment), } diff --git a/builtin/providers/oracleopc/resource_instance.go b/builtin/providers/oracleopc/resource_instance.go index 70f3b99c8..f242ab412 100644 --- a/builtin/providers/oracleopc/resource_instance.go +++ b/builtin/providers/oracleopc/resource_instance.go @@ -212,7 +212,7 @@ func getStorageAttachments(d *schema.ResourceData) []compute.LaunchPlanStorageAt for _, i := range storage.List() { attrs := i.(map[string]interface{}) storageAttachments = append(storageAttachments, compute.LaunchPlanStorageAttachmentSpec{ - Index: attrs["index"].(int), + Index: attrs["index"].(int), Volume: attrs["volume"].(string), }) } diff --git a/builtin/providers/oracleopc/resource_ip_reservation.go b/builtin/providers/oracleopc/resource_ip_reservation.go index fa25679d2..84bc3781d 100644 --- a/builtin/providers/oracleopc/resource_ip_reservation.go +++ b/builtin/providers/oracleopc/resource_ip_reservation.go @@ -38,7 +38,7 @@ func resourceIPReservation() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - + "ip": &schema.Schema{ Type: schema.TypeString, Optional: false, diff --git a/builtin/providers/oracleopc/resource_storage_volume.go b/builtin/providers/oracleopc/resource_storage_volume.go index 2d80d09f2..73d4a2dc3 100644 --- a/builtin/providers/oracleopc/resource_storage_volume.go +++ b/builtin/providers/oracleopc/resource_storage_volume.go @@ -278,7 +278,7 @@ func resourceStorageVolumeDelete(d *schema.ResourceData, meta interface{}) error sva.DeleteStorageAttachment(attachment.Name) sva.WaitForStorageAttachmentDeleted(attachment.Name, meta.(*OPCClient).MaxRetryTimeout) attachmentsToCache[index] = storageAttachment{ - index: attachment.Index, + index: attachment.Index, instanceName: compute.InstanceNameFromString(attachment.InstanceName), } } From 51162119b9c569129347722db5b54820ec659cd7 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 4 Apr 2017 09:36:34 -0400 Subject: [PATCH 020/342] remove old provider naming --- builtin/providers/oracleopc/config.go | 47 --- builtin/providers/oracleopc/provider.go | 75 ----- builtin/providers/oracleopc/provider_test.go | 61 ---- .../providers/oracleopc/resource_instance.go | 306 ------------------ .../oracleopc/resource_instance_test.go | 156 --------- .../oracleopc/resource_ip_association.go | 103 ------ .../oracleopc/resource_ip_association_test.go | 74 ----- .../oracleopc/resource_ip_reservation.go | 122 ------- .../resource_security_application.go | 124 ------- .../resource_security_association.go | 103 ------ .../resource_security_association_test.go | 75 ----- .../oracleopc/resource_security_ip_list.go | 117 ------- .../oracleopc/resource_security_list.go | 119 ------- .../oracleopc/resource_security_rule.go | 143 -------- .../oracleopc/resource_security_rule_test.go | 85 ----- .../providers/oracleopc/resource_ssh_key.go | 117 ------- .../oracleopc/resource_storage_volume.go | 301 ----------------- .../oracleopc/resource_storage_volume_test.go | 70 ---- 18 files changed, 2198 deletions(-) delete mode 100644 builtin/providers/oracleopc/config.go delete mode 100644 builtin/providers/oracleopc/provider.go delete mode 100644 builtin/providers/oracleopc/provider_test.go delete mode 100644 builtin/providers/oracleopc/resource_instance.go delete mode 100644 builtin/providers/oracleopc/resource_instance_test.go delete mode 100644 builtin/providers/oracleopc/resource_ip_association.go delete mode 100644 builtin/providers/oracleopc/resource_ip_association_test.go delete mode 100644 builtin/providers/oracleopc/resource_ip_reservation.go delete mode 100644 builtin/providers/oracleopc/resource_security_application.go delete mode 100644 builtin/providers/oracleopc/resource_security_association.go delete mode 100644 builtin/providers/oracleopc/resource_security_association_test.go delete mode 100644 builtin/providers/oracleopc/resource_security_ip_list.go delete mode 100644 builtin/providers/oracleopc/resource_security_list.go delete mode 100644 builtin/providers/oracleopc/resource_security_rule.go delete mode 100644 builtin/providers/oracleopc/resource_security_rule_test.go delete mode 100644 builtin/providers/oracleopc/resource_ssh_key.go delete mode 100644 builtin/providers/oracleopc/resource_storage_volume.go delete mode 100644 builtin/providers/oracleopc/resource_storage_volume_test.go diff --git a/builtin/providers/oracleopc/config.go b/builtin/providers/oracleopc/config.go deleted file mode 100644 index 9e4c134e7..000000000 --- a/builtin/providers/oracleopc/config.go +++ /dev/null @@ -1,47 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "net/url" -) - -type Config struct { - User string - Password string - IdentityDomain string - Endpoint string - MaxRetryTimeout int -} - -type storageAttachment struct { - index int - instanceName *compute.InstanceName -} - -type OPCClient struct { - *compute.AuthenticatedClient - MaxRetryTimeout int - storageAttachmentsByVolumeCache map[string][]storageAttachment -} - -func (c *Config) Client() (*OPCClient, error) { - u, err := url.ParseRequestURI(c.Endpoint) - if err != nil { - return nil, fmt.Errorf("Invalid endpoint URI: %s", err) - } - - client := compute.NewComputeClient(c.IdentityDomain, c.User, c.Password, u) - authenticatedClient, err := client.Authenticate() - if err != nil { - return nil, fmt.Errorf("Authentication failed: %s", err) - } - - opcClient := &OPCClient{ - AuthenticatedClient: authenticatedClient, - MaxRetryTimeout: c.MaxRetryTimeout, - storageAttachmentsByVolumeCache: make(map[string][]storageAttachment), - } - - return opcClient, nil -} diff --git a/builtin/providers/oracleopc/provider.go b/builtin/providers/oracleopc/provider.go deleted file mode 100644 index a6d0d3fb5..000000000 --- a/builtin/providers/oracleopc/provider.go +++ /dev/null @@ -1,75 +0,0 @@ -package opc - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_USERNAME", nil), - Description: "The user name for OPC API operations.", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_PASSWORD", nil), - Description: "The user password for OPC API operations.", - }, - - "identityDomain": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_IDENTITY_DOMAIN", nil), - Description: "The OPC identity domain for API operations", - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_ENDPOINT", nil), - Description: "The HTTP endpoint for OPC API operations.", - }, - - "maxRetryTimeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OPC_MAX_RETRY_TIMEOUT", 3000), - Description: "Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000)", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "opc_compute_storage_volume": resourceStorageVolume(), - "opc_compute_instance": resourceInstance(), - "opc_compute_ssh_key": resourceSSHKey(), - "opc_compute_security_application": resourceSecurityApplication(), - "opc_compute_security_list": resourceSecurityList(), - "opc_compute_security_ip_list": resourceSecurityIPList(), - "opc_compute_ip_reservation": resourceIPReservation(), - "opc_compute_ip_association": resourceIPAssociation(), - "opc_compute_security_rule": resourceSecurityRule(), - "opc_compute_security_association": resourceSecurityAssociation(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - User: d.Get("user").(string), - Password: d.Get("password").(string), - IdentityDomain: d.Get("identityDomain").(string), - Endpoint: d.Get("endpoint").(string), - MaxRetryTimeout: d.Get("maxRetryTimeout").(int), - } - - return config.Client() -} diff --git a/builtin/providers/oracleopc/provider_test.go b/builtin/providers/oracleopc/provider_test.go deleted file mode 100644 index c60076b06..000000000 --- a/builtin/providers/oracleopc/provider_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package opc - -import ( - "os" - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "opc": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - required := []string{"OPC_USERNAME", "OPC_PASSWORD", "OPC_IDENTITY_DOMAIN", "OPC_ENDPOINT"} - for _, prop := range required { - if os.Getenv(prop) == "" { - t.Fatalf("%s must be set for acceptance test", prop) - } - } -} - -type OPCResourceState struct { - *OPCClient - *terraform.InstanceState -} - -func opcResourceCheck(resourceName string, f func(checker *OPCResourceState) error) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Resource not found: %s", resourceName) - } - - state := &OPCResourceState{ - OPCClient: testAccProvider.Meta().(*OPCClient), - InstanceState: rs.Primary, - } - - return f(state) - } -} diff --git a/builtin/providers/oracleopc/resource_instance.go b/builtin/providers/oracleopc/resource_instance.go deleted file mode 100644 index f242ab412..000000000 --- a/builtin/providers/oracleopc/resource_instance.go +++ /dev/null @@ -1,306 +0,0 @@ -package opc - -import ( - "encoding/json" - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceInstanceCreate, - Read: resourceInstanceRead, - Delete: resourceInstanceDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "shape": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "imageList": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "label": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip": { - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - - "opcId": { - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - - "sshKeys": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "attributes": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vcable": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "storage": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "volume": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "bootOrder": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - }, - } -} - -func getAttrs(d *schema.ResourceData) (*map[string]interface{}, error) { - var attrs map[string]interface{} - - attrString := d.Get("attributes").(string) - if attrString == "" { - return &attrs, nil - } - if err := json.Unmarshal([]byte(attrString), &attrs); err != nil { - return &attrs, fmt.Errorf("Cannot parse '%s' as json", attrString) - } - return &attrs, nil -} - -func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - shape := d.Get("shape").(string) - imageList := d.Get("imageList").(string) - label := d.Get("label").(string) - storage := getStorageAttachments(d) - sshKeys := getSSHKeys(d) - bootOrder := getBootOrder(d) - - attrs, err := getAttrs(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating instance with name %s, shape %s, imageList %s, storage %s, bootOrder %s, label %s, sshKeys %s, attrs %#v", - name, shape, imageList, storage, bootOrder, label, sshKeys, attrs) - - id, err := client.LaunchInstance(name, label, shape, imageList, storage, bootOrder, sshKeys, *attrs) - if err != nil { - return fmt.Errorf("Error creating instance %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for instance %s to come online", id.String()) - info, err := client.WaitForInstanceRunning(id, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for instance %s to come online: %s", id, err) - } - - log.Printf("[DEBUG] Created instance %s: %#v", id, info) - - attachStorage( - &compute.InstanceName{ - Name: info.Name, - ID: info.ID, - }, - d, meta) - - d.SetId(info.Name) - updateInstanceResourceData(d, info) - return nil -} - -func attachStorage(name *compute.InstanceName, d *schema.ResourceData, meta interface{}) error { - storageClient := meta.(*OPCClient).StorageAttachments() - storage := d.Get("storage").(*schema.Set) - updatedStorage := schema.NewSet(storage.F, []interface{}{}) - - for _, i := range storage.List() { - attrs := i.(map[string]interface{}) - attachmentInfo, err := storageClient.CreateStorageAttachment( - attrs["index"].(int), - name, - attrs["volume"].(string)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for storage attachment %#v to come online", attachmentInfo) - storageClient.WaitForStorageAttachmentCreated(attachmentInfo.Name, meta.(*OPCClient).MaxRetryTimeout) - log.Printf("[DEBUG] Storage attachment %s: %s-%s created", - attachmentInfo.Name, attachmentInfo.InstanceName, attachmentInfo.StorageVolumeName) - attrs["name"] = attachmentInfo.Name - updatedStorage.Add(attrs) - } - - d.Set("storage", updatedStorage) - return nil -} - -func getSSHKeys(d *schema.ResourceData) []string { - sshKeys := []string{} - for _, i := range d.Get("sshKeys").([]interface{}) { - sshKeys = append(sshKeys, i.(string)) - } - return sshKeys -} - -func getBootOrder(d *schema.ResourceData) []int { - bootOrder := []int{} - for _, i := range d.Get("bootOrder").([]interface{}) { - bootOrder = append(bootOrder, i.(int)) - } - return bootOrder -} - -func getStorageAttachments(d *schema.ResourceData) []compute.LaunchPlanStorageAttachmentSpec { - storageAttachments := []compute.LaunchPlanStorageAttachmentSpec{} - storage := d.Get("storage").(*schema.Set) - for _, i := range storage.List() { - attrs := i.(map[string]interface{}) - storageAttachments = append(storageAttachments, compute.LaunchPlanStorageAttachmentSpec{ - Index: attrs["index"].(int), - Volume: attrs["volume"].(string), - }) - } - return storageAttachments -} - -func updateInstanceResourceData(d *schema.ResourceData, info *compute.InstanceInfo) error { - d.Set("name", info.Name) - d.Set("opcId", info.ID) - d.Set("imageList", info.ImageList) - d.Set("bootOrder", info.BootOrder) - d.Set("sshKeys", info.SSHKeys) - d.Set("label", info.Label) - d.Set("ip", info.IPAddress) - d.Set("vcable", info.VCableID) - - return nil -} - -func resourceInstanceRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - instanceName := &compute.InstanceName{ - Name: name, - ID: d.Get("opcId").(string), - } - - log.Printf("[DEBUG] Reading state of instance %s", instanceName) - result, err := client.GetInstance(instanceName) - if err != nil { - // Instance doesn't exist - if compute.WasNotFoundError(err) { - log.Printf("[DEBUG] Instance %s not found", instanceName) - d.SetId("") - return nil - } - return fmt.Errorf("Error reading instance %s: %s", instanceName, err) - } - - log.Printf("[DEBUG] Read state of instance %s: %#v", instanceName, result) - - attachments, err := meta.(*OPCClient).StorageAttachments().GetStorageAttachmentsForInstance(instanceName) - if err != nil { - return fmt.Errorf("Error reading storage attachments for instance %s: %s", instanceName, err) - } - updateInstanceResourceData(d, result) - updateAttachmentResourceData(d, attachments) - return nil -} - -func updateAttachmentResourceData(d *schema.ResourceData, attachments *[]compute.StorageAttachmentInfo) { - attachmentSet := schema.NewSet(d.Get("storage").(*schema.Set).F, []interface{}{}) - for _, attachment := range *attachments { - properties := map[string]interface{}{ - "index": attachment.Index, - "volume": attachment.StorageVolumeName, - "name": attachment.Name, - } - attachmentSet.Add(properties) - } - d.Set("storage", attachmentSet) -} - -func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d.State()) - client := meta.(*OPCClient).Instances() - name := d.Get("name").(string) - - instanceName := &compute.InstanceName{ - Name: name, - ID: d.Get("opcId").(string), - } - - log.Printf("[DEBUG] Deleting instance %s", instanceName) - if err := client.DeleteInstance(instanceName); err != nil { - return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) - } - if err := client.WaitForInstanceDeleted(instanceName, meta.(*OPCClient).MaxRetryTimeout); err != nil { - return fmt.Errorf("Error deleting instance %s: %s", instanceName, err) - } - - for _, attachment := range d.Get("storage").(*schema.Set).List() { - name := attachment.(map[string]interface{})["name"].(string) - log.Printf("[DEBUG] Deleting storage attachment %s", name) - client.StorageAttachments().DeleteStorageAttachment(name) - client.StorageAttachments().WaitForStorageAttachmentDeleted(name, meta.(*OPCClient).MaxRetryTimeout) - } - - return nil -} diff --git a/builtin/providers/oracleopc/resource_instance_test.go b/builtin/providers/oracleopc/resource_instance_test.go deleted file mode 100644 index 6f386af84..000000000 --- a/builtin/providers/oracleopc/resource_instance_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "testing" -) - -func TestAccOPCInstance_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - instanceResourceName, - testAccCheckInstanceDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccInstanceBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - instanceResourceName, - testAccCheckInstanceExists), - opcResourceCheck( - keyResourceName, - testAccCheckSSHKeyExists), - ), - }, - { - Config: modifySSHKey, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - instanceResourceName, - testAccCheckInstanceExists), - opcResourceCheck( - keyResourceName, - testAccCheckSSHKeyUpdated), - ), - }, - }, - }) -} - -func testAccCheckInstanceExists(state *OPCResourceState) error { - instanceName := getInstanceName(state) - - if _, err := state.Instances().GetInstance(instanceName); err != nil { - return fmt.Errorf("Error retrieving state of instance %s: %s", instanceName, err) - } - - return nil -} - -func testAccCheckSSHKeyExists(state *OPCResourceState) error { - keyName := state.Attributes["name"] - - if _, err := state.SSHKeys().GetSSHKey(keyName); err != nil { - return fmt.Errorf("Error retrieving state of key %s: %s", keyName, err) - } - - return nil -} - -func testAccCheckSSHKeyUpdated(state *OPCResourceState) error { - keyName := state.Attributes["name"] - info, err := state.SSHKeys().GetSSHKey(keyName) - if err != nil { - return err - } - if info.Key != updatedKey { - return fmt.Errorf("Expected key\n\t%s\nbut was\n\t%s", updatedKey, info.Key) - } - return nil -} - -func getInstanceName(rs *OPCResourceState) *compute.InstanceName { - return &compute.InstanceName{ - Name: rs.Attributes["name"], - ID: rs.Attributes["opcId"], - } -} - -func testAccCheckInstanceDestroyed(state *OPCResourceState) error { - instanceName := getInstanceName(state) - if info, err := state.Instances().GetInstance(instanceName); err == nil { - return fmt.Errorf("Instance %s still exists: %#v", instanceName, info) - } - - return nil -} - -const instanceName = "test_instance" -const keyName = "test_key" - -var instanceResourceName = fmt.Sprintf("opc_compute_instance.%s", instanceName) -var keyResourceName = fmt.Sprintf("opc_compute_ssh_key.%s", keyName) - -const originalKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" -const updatedKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHvb/2OSemgzUYLNW1/T3u33r7sZy1qbWtgVWiREH4gS5TVmDVPuvN1MFLdNqiWQA53gK8Gp24jtjNm9ftcPhicv81HVWJTB69C0sJGEfF0l4mgbemJLH3i37Mb6SdWJcGof9qHVDADPgiC8jIBVUhdiJSeq4fUJ3NQA2eUExBkRglQWairkNzPNA0mi3GL9KDGnoBnSCAXNGoKgDgIOqW0dYFP6oHyGWkF7V+/TME9aIQvmMpHjVzl7brZ/wED2t5vTJxxbgogHEmWnfs7p8EP5IsN6Vnjd0VNIt1tu3TduS8kH5npkPqZz8oIP93Ypxn0l7ZNEl9MahbhPj3gJ1YY7Cygrlt1VLC1ibBbOgIS2Lj6vGG/Yjkqs3Vw6qrmTRlsJ9c6bZO2xq0xzV11XQHvjPegBOClF6AztEe1jKU/RUFnzjIF8lUmM63fTaXuVkNERkTSE3E9XL3Uq6eqYdef7wHFFhCMSGotp3ANAb30kflysA9ID0b3o5QU2tB8OBxBicXQy11lh+u204YJuvIzeTXo+JAad5TWFlJcsUlbPFppLQdhUpoWaJouBGJV36DJb9R34i9T8Ze5tnJUQgPmMkERyPvb/+v5j3s2hs1A9WO6/MqmZd70gudsX/1bqWT898vCCOdM+CspNVY7nHVUtde7C6BrHzphr/C1YBXHw==" - -var testAccInstanceBasic = fmt.Sprintf(` -resource "opc_compute_instance" "%s" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.test_key.name}"] - attributes = "{\"foo\": \"bar\"}" - storage = { - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - } -} - -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} - -resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "%s" - enabled = true -} -`, instanceName, keyName, originalKey) - -var modifySSHKey = fmt.Sprintf(` -resource "opc_compute_instance" "%s" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.test_key.name}"] - attributes = "{\"foo\": \"bar\"}" - storage = { - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - } -} - -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} - -resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "%s" - enabled = true -} -`, instanceName, keyName, updatedKey) diff --git a/builtin/providers/oracleopc/resource_ip_association.go b/builtin/providers/oracleopc/resource_ip_association.go deleted file mode 100644 index 84df10ba8..000000000 --- a/builtin/providers/oracleopc/resource_ip_association.go +++ /dev/null @@ -1,103 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceIPAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceIPAssociationCreate, - Read: resourceIPAssociationRead, - Delete: resourceIPAssociationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "vcable": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "parentpool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceIPAssociationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - vcable, parentpool := getIPAssociationResourceData(d) - - log.Printf("[DEBUG] Creating ip association between vcable %s and parent pool %s", - vcable, parentpool) - - client := meta.(*OPCClient).IPAssociations() - info, err := client.CreateIPAssociation(vcable, parentpool) - if err != nil { - return fmt.Errorf("Error creating ip association between vcable %s and parent pool %s: %s", - vcable, parentpool, err) - } - - d.SetId(info.Name) - updateIPAssociationResourceData(d, info) - return nil -} - -func updateIPAssociationResourceData(d *schema.ResourceData, info *compute.IPAssociationInfo) { - d.Set("name", info.Name) - d.Set("parentpool", info.ParentPool) - d.Set("vcable", info.VCable) -} - -func resourceIPAssociationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPAssociations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ip association %s", name) - result, err := client.GetIPAssociation(name) - if err != nil { - // IP Association does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ip association %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ip association %s: %#v", name, result) - updateIPAssociationResourceData(d, result) - return nil -} - -func getIPAssociationResourceData(d *schema.ResourceData) (string, string) { - return d.Get("vcable").(string), d.Get("parentpool").(string) -} - -func resourceIPAssociationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPAssociations() - name := d.Get("name").(string) - - vcable, parentpool := getIPAssociationResourceData(d) - log.Printf("[DEBUG] Deleting ip association %s between vcable %s and parent pool %s", - name, vcable, parentpool) - - if err := client.DeleteIPAssociation(name); err != nil { - return fmt.Errorf("Error deleting ip association %s between vcable %s and parent pool %s: %s", - name, vcable, parentpool, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_ip_association_test.go b/builtin/providers/oracleopc/resource_ip_association_test.go deleted file mode 100644 index 44f48474f..000000000 --- a/builtin/providers/oracleopc/resource_ip_association_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceIPAssociation_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - ipAssociationResourceName, - testAccCheckIPAssociationDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccIPAssociationBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - ipAssociationResourceName, - testAccCheckIPAssociationExists), - ), - }, - }, - }) -} - -func testAccCheckIPAssociationExists(state *OPCResourceState) error { - associationName := getIPAssociationName(state) - - if _, err := state.IPAssociations().GetIPAssociation(associationName); err != nil { - return fmt.Errorf("Error retrieving state of ip assocation %s: %s", associationName, err) - } - - return nil -} - -func getIPAssociationName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckIPAssociationDestroyed(state *OPCResourceState) error { - associationName := getAssociationName(state) - if info, err := state.IPAssociations().GetIPAssociation(associationName); err == nil { - return fmt.Errorf("IP association %s still exists: %#v", associationName, info) - } - - return nil -} - -const ipAssociationName = "test_ip_association" - -var ipAssociationResourceName = fmt.Sprintf("opc_compute_ip_association.%s", ipAssociationName) - -var testAccIPAssociationBasic = fmt.Sprintf(` -resource "opc_compute_ip_reservation" "reservation1" { - parentpool = "/oracle/public/ippool" - permanent = true -} - -resource "opc_compute_ip_association" "%s" { - vcable = "${opc_compute_instance.test-instance1.vcable}" - parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" -} - -resource "opc_compute_instance" "test-instance1" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" -} -`, ipAssociationName) diff --git a/builtin/providers/oracleopc/resource_ip_reservation.go b/builtin/providers/oracleopc/resource_ip_reservation.go deleted file mode 100644 index 84bc3781d..000000000 --- a/builtin/providers/oracleopc/resource_ip_reservation.go +++ /dev/null @@ -1,122 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceIPReservation() *schema.Resource { - return &schema.Resource{ - Create: resourceIPReservationCreate, - Read: resourceIPReservationRead, - Delete: resourceIPReservationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "permanent": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: true, - }, - - "parentpool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "ip": &schema.Schema{ - Type: schema.TypeString, - Optional: false, - Computed: true, - }, - }, - } -} - -func resourceIPReservationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - parentpool, permanent, tags := getIPReservationResourceData(d) - - log.Printf("[DEBUG] Creating ip reservation from parentpool %s with tags=%s", - parentpool, tags) - - client := meta.(*OPCClient).IPReservations() - info, err := client.CreateIPReservation(parentpool, permanent, tags) - if err != nil { - return fmt.Errorf("Error creating ip reservation from parentpool %s with tags=%s: %s", - parentpool, tags, err) - } - - d.SetId(info.Name) - updateIPReservationResourceData(d, info) - return nil -} - -func updateIPReservationResourceData(d *schema.ResourceData, info *compute.IPReservationInfo) { - d.Set("name", info.Name) - d.Set("parentpool", info.ParentPool) - d.Set("permanent", info.Permanent) - d.Set("tags", info.Tags) - d.Set("ip", info.IP) -} - -func resourceIPReservationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPReservations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ip reservation %s", name) - result, err := client.GetIPReservation(name) - if err != nil { - // IP Reservation does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ip reservation %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ip reservation %s: %#v", name, result) - updateIPReservationResourceData(d, result) - return nil -} - -func getIPReservationResourceData(d *schema.ResourceData) (string, bool, []string) { - tagdata := d.Get("tags").([]interface{}) - tags := make([]string, len(tagdata)) - for i, tag := range tagdata { - tags[i] = tag.(string) - } - return d.Get("parentpool").(string), - d.Get("permanent").(bool), - tags -} - -func resourceIPReservationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).IPReservations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ip reservation %s", name) - - if err := client.DeleteIPReservation(name); err != nil { - return fmt.Errorf("Error deleting ip reservation %s", name) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_application.go b/builtin/providers/oracleopc/resource_security_application.go deleted file mode 100644 index b7205754c..000000000 --- a/builtin/providers/oracleopc/resource_security_application.go +++ /dev/null @@ -1,124 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityApplication() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityApplicationCreate, - Read: resourceSecurityApplicationRead, - Delete: resourceSecurityApplicationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "dport": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "icmptype": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "icmpcode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceSecurityApplicationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, protocol, dport, icmptype, icmpcode, description := getSecurityApplicationResourceData(d) - - log.Printf("[DEBUG] Creating security application %s", name) - - client := meta.(*OPCClient).SecurityApplications() - info, err := client.CreateSecurityApplication(name, protocol, dport, icmptype, icmpcode, description) - if err != nil { - return fmt.Errorf("Error creating security application %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityApplicationResourceData(d, info) - return nil -} - -func updateSecurityApplicationResourceData(d *schema.ResourceData, info *compute.SecurityApplicationInfo) { - d.Set("name", info.Name) - d.Set("protocol", info.Protocol) - d.Set("dport", info.DPort) - d.Set("icmptype", info.ICMPType) - d.Set("icmpcode", info.ICMPCode) - d.Set("description", info.Description) -} - -func resourceSecurityApplicationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityApplications() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security application %s", name) - result, err := client.GetSecurityApplication(name) - if err != nil { - // Security Application does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security application %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security application %s: %#v", name, result) - updateSecurityApplicationResourceData(d, result) - return nil -} - -func getSecurityApplicationResourceData(d *schema.ResourceData) (string, string, string, string, string, string) { - return d.Get("name").(string), - d.Get("protocol").(string), - d.Get("dport").(string), - d.Get("icmptype").(string), - d.Get("icmpcode").(string), - d.Get("description").(string) -} - -func resourceSecurityApplicationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityApplications() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting security application %s", name) - - if err := client.DeleteSecurityApplication(name); err != nil { - return fmt.Errorf("Error deleting security application %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_association.go b/builtin/providers/oracleopc/resource_security_association.go deleted file mode 100644 index 15a912657..000000000 --- a/builtin/providers/oracleopc/resource_security_association.go +++ /dev/null @@ -1,103 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityAssociationCreate, - Read: resourceSecurityAssociationRead, - Delete: resourceSecurityAssociationDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "vcable": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "seclist": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceSecurityAssociationCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - vcable, seclist := getSecurityAssociationResourceData(d) - - log.Printf("[DEBUG] Creating security association between vcable %s and security list %s", - vcable, seclist) - - client := meta.(*OPCClient).SecurityAssociations() - info, err := client.CreateSecurityAssociation(vcable, seclist) - if err != nil { - return fmt.Errorf("Error creating security association between vcable %s and security list %s: %s", - vcable, seclist, err) - } - - d.SetId(info.Name) - updateSecurityAssociationResourceData(d, info) - return nil -} - -func updateSecurityAssociationResourceData(d *schema.ResourceData, info *compute.SecurityAssociationInfo) { - d.Set("name", info.Name) - d.Set("seclist", info.SecList) - d.Set("vcable", info.VCable) -} - -func resourceSecurityAssociationRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityAssociations() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security association %s", name) - result, err := client.GetSecurityAssociation(name) - if err != nil { - // Security Association does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security association %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security association %s: %#v", name, result) - updateSecurityAssociationResourceData(d, result) - return nil -} - -func getSecurityAssociationResourceData(d *schema.ResourceData) (string, string) { - return d.Get("vcable").(string), d.Get("seclist").(string) -} - -func resourceSecurityAssociationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityAssociations() - name := d.Get("name").(string) - - vcable, seclist := getSecurityAssociationResourceData(d) - log.Printf("[DEBUG] Deleting security association %s between vcable %s and security list %s", - name, vcable, seclist) - - if err := client.DeleteSecurityAssociation(name); err != nil { - return fmt.Errorf("Error deleting security association %s between vcable %s and security list %s: %s", - name, vcable, seclist, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_association_test.go b/builtin/providers/oracleopc/resource_security_association_test.go deleted file mode 100644 index 604ef64cb..000000000 --- a/builtin/providers/oracleopc/resource_security_association_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceSecurityAssociation_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - associationResourceName, - testAccCheckAssociationDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccSecurityAssociationBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - associationResourceName, - testAccCheckAssociationExists), - ), - }, - }, - }) -} - -func testAccCheckAssociationExists(state *OPCResourceState) error { - associationName := getAssociationName(state) - - if _, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err != nil { - return fmt.Errorf("Error retrieving state of security assocation %s: %s", associationName, err) - } - - return nil -} - -func getAssociationName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckAssociationDestroyed(state *OPCResourceState) error { - associationName := getAssociationName(state) - if info, err := state.SecurityAssociations().GetSecurityAssociation(associationName); err == nil { - return fmt.Errorf("Association %s still exists: %#v", associationName, info) - } - - return nil -} - -const associationName = "test_rule" - -var associationResourceName = fmt.Sprintf("opc_compute_security_association.%s", associationName) - -var testAccSecurityAssociationBasic = fmt.Sprintf(` -resource "opc_compute_security_list" "sec-list1" { - name = "sec-list-1" - policy = "PERMIT" - outbound_cidr_policy = "DENY" -} - -resource "opc_compute_security_association" "%s" { - vcable = "${opc_compute_instance.test-instance1.vcable}" - seclist = "${opc_compute_security_list.sec-list1.name}" -} - -resource "opc_compute_instance" "test-instance1" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" -} -`, ruleName) diff --git a/builtin/providers/oracleopc/resource_security_ip_list.go b/builtin/providers/oracleopc/resource_security_ip_list.go deleted file mode 100644 index 6a3e66b28..000000000 --- a/builtin/providers/oracleopc/resource_security_ip_list.go +++ /dev/null @@ -1,117 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityIPList() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityIPListCreate, - Read: resourceSecurityIPListRead, - Update: resourceSecurityIPListUpdate, - Delete: resourceSecurityIPListDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_entries": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourceSecurityIPListCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, ipEntries := getSecurityIPListResourceData(d) - - log.Printf("[DEBUG] Creating security IP list with name %s, entries %s", - name, ipEntries) - - client := meta.(*OPCClient).SecurityIPLists() - info, err := client.CreateSecurityIPList(name, ipEntries) - if err != nil { - return fmt.Errorf("Error creating security IP list %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityIPListResourceData(d, info) - return nil -} - -func updateSecurityIPListResourceData(d *schema.ResourceData, info *compute.SecurityIPListInfo) { - d.Set("name", info.Name) - d.Set("entries", info.SecIPEntries) -} - -func resourceSecurityIPListRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityIPLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security IP list %s", name) - result, err := client.GetSecurityIPList(name) - if err != nil { - // Security IP List does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security IP list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of security IP list %s: %#v", name, result) - updateSecurityIPListResourceData(d, result) - return nil -} - -func getSecurityIPListResourceData(d *schema.ResourceData) (string, []string) { - name := d.Get("name").(string) - ipEntries := d.Get("ip_entries").([]interface{}) - ipEntryStrings := []string{} - for _, entry := range ipEntries { - ipEntryStrings = append(ipEntryStrings, entry.(string)) - } - return name, ipEntryStrings -} - -func resourceSecurityIPListUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityIPLists() - name, entries := getSecurityIPListResourceData(d) - - log.Printf("[DEBUG] Updating security IP list %s with ip entries %s", - name, entries) - - info, err := client.UpdateSecurityIPList(name, entries) - if err != nil { - return fmt.Errorf("Error updating security IP list %s: %s", name, err) - } - - updateSecurityIPListResourceData(d, info) - return nil -} - -func resourceSecurityIPListDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityIPLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting security IP list %s", name) - if err := client.DeleteSecurityIPList(name); err != nil { - return fmt.Errorf("Error deleting security IP list %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_list.go b/builtin/providers/oracleopc/resource_security_list.go deleted file mode 100644 index eea11bbb1..000000000 --- a/builtin/providers/oracleopc/resource_security_list.go +++ /dev/null @@ -1,119 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityList() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityListCreate, - Read: resourceSecurityListRead, - Update: resourceSecurityListUpdate, - Delete: resourceSecurityListDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "outbound_cidr_policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSecurityListCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) - - log.Printf("[DEBUG] Creating security list with name %s, policy %s, outbound CIDR policy %s", - name, policy, outboundCIDRPolicy) - - client := meta.(*OPCClient).SecurityLists() - info, err := client.CreateSecurityList(name, policy, outboundCIDRPolicy) - if err != nil { - return fmt.Errorf("Error creating security list %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityListResourceData(d, info) - return nil -} - -func updateSecurityListResourceData(d *schema.ResourceData, info *compute.SecurityListInfo) { - d.Set("name", info.Name) - d.Set("policy", info.Policy) - d.Set("outbound_cidr_policy", info.OutboundCIDRPolicy) -} - -func resourceSecurityListRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security list %s", name) - result, err := client.GetSecurityList(name) - if err != nil { - // Security List does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSecurityListResourceData(d, result) - return nil -} - -func getSecurityListResourceData(d *schema.ResourceData) (string, string, string) { - return d.Get("name").(string), - d.Get("policy").(string), - d.Get("outbound_cidr_policy").(string) -} - -func resourceSecurityListUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityLists() - name, policy, outboundCIDRPolicy := getSecurityListResourceData(d) - - log.Printf("[DEBUG] Updating security list %s with policy %s, outbound_cidr_policy %s", - name, policy, outboundCIDRPolicy) - - info, err := client.UpdateSecurityList(name, policy, outboundCIDRPolicy) - if err != nil { - return fmt.Errorf("Error updating security list %s: %s", name, err) - } - - updateSecurityListResourceData(d, info) - return nil -} - -func resourceSecurityListDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityLists() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSecurityList(name); err != nil { - return fmt.Errorf("Error deleting security list %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_rule.go b/builtin/providers/oracleopc/resource_security_rule.go deleted file mode 100644 index 0d9eb562c..000000000 --- a/builtin/providers/oracleopc/resource_security_rule.go +++ /dev/null @@ -1,143 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSecurityRule() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityRuleCreate, - Read: resourceSecurityRuleRead, - Update: resourceSecurityRuleUpdate, - Delete: resourceSecurityRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "source_list": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination_list": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "application": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "disabled": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSecurityRuleCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) - - log.Printf("[DEBUG] Creating security list with name %s, sourceList %s, destinationList %s, application %s, action %s, disabled %s", - name, sourceList, destinationList, application, action, disabled) - - client := meta.(*OPCClient).SecurityRules() - info, err := client.CreateSecurityRule(name, sourceList, destinationList, application, action, disabled) - if err != nil { - return fmt.Errorf("Error creating security rule %s: %s", name, err) - } - - d.SetId(info.Name) - updateSecurityRuleResourceData(d, info) - return nil -} - -func updateSecurityRuleResourceData(d *schema.ResourceData, info *compute.SecurityRuleInfo) { - d.Set("name", info.Name) - d.Set("source_list", info.SourceList) - d.Set("destination_list", info.DestinationList) - d.Set("application", info.Application) - d.Set("action", info.Action) - d.Set("disabled", info.Disabled) -} - -func resourceSecurityRuleRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityRules() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of security rule %s", name) - result, err := client.GetSecurityRule(name) - if err != nil { - // Security Rule does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading security list %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSecurityRuleResourceData(d, result) - return nil -} - -func getSecurityRuleResourceData(d *schema.ResourceData) (string, string, string, string, string, bool) { - return d.Get("name").(string), - d.Get("source_list").(string), - d.Get("destination_list").(string), - d.Get("application").(string), - d.Get("action").(string), - d.Get("disabled").(bool) -} - -func resourceSecurityRuleUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - - client := meta.(*OPCClient).SecurityRules() - name, sourceList, destinationList, application, action, disabled := getSecurityRuleResourceData(d) - - log.Printf("[DEBUG] Updating security list %s with sourceList %s, destinationList %s, application %s, action %s, disabled %s", - name, sourceList, destinationList, application, action, disabled) - - info, err := client.UpdateSecurityRule(name, sourceList, destinationList, application, action, disabled) - if err != nil { - return fmt.Errorf("Error updating security rule %s: %s", name, err) - } - - updateSecurityRuleResourceData(d, info) - return nil -} - -func resourceSecurityRuleDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource state: %#v", d.State()) - client := meta.(*OPCClient).SecurityRules() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSecurityRule(name); err != nil { - return fmt.Errorf("Error deleting security rule %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_security_rule_test.go b/builtin/providers/oracleopc/resource_security_rule_test.go deleted file mode 100644 index f09c2b879..000000000 --- a/builtin/providers/oracleopc/resource_security_rule_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCResourceSecurityRule_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - ruleResourceName, - testAccCheckRuleDestroyed), - Steps: []resource.TestStep{ - { - Config: testAccSecurityRuleBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - ruleResourceName, - testAccCheckRuleExists), - ), - }, - }, - }) -} - -func testAccCheckRuleExists(state *OPCResourceState) error { - ruleName := getRuleName(state) - - if _, err := state.SecurityRules().GetSecurityRule(ruleName); err != nil { - return fmt.Errorf("Error retrieving state of security rule %s: %s", ruleName, err) - } - - return nil -} - -func getRuleName(rs *OPCResourceState) string { - return rs.Attributes["name"] -} - -func testAccCheckRuleDestroyed(state *OPCResourceState) error { - ruleName := getRuleName(state) - if info, err := state.SecurityRules().GetSecurityRule(ruleName); err == nil { - return fmt.Errorf("Rule %s still exists: %#v", ruleName, info) - } - - return nil -} - -const ruleName = "test_rule" -const secListName = "sec-list1" -const secIpListName = "sec-ip-list1" - -var ruleResourceName = fmt.Sprintf("opc_compute_security_rule.%s", ruleName) - -var testAccSecurityRuleBasic = fmt.Sprintf(` -resource "opc_compute_security_rule" "%s" { - name = "test" - source_list = "seclist:${opc_compute_security_list.sec-list1.name}" - destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" - action = "PERMIT" - application = "${opc_compute_security_application.spring-boot.name}" - disabled = false -} - -resource "opc_compute_security_list" "%s" { - name = "sec-list-1" - policy = "PERMIT" - outbound_cidr_policy = "DENY" -} - -resource "opc_compute_security_application" "spring-boot" { - name = "spring-boot" - protocol = "tcp" - dport = "8080" -} - -resource "opc_compute_security_ip_list" "%s" { - name = "sec-ip-list1" - ip_entries = ["217.138.34.4"] -} -`, ruleName, secListName, secIpListName) diff --git a/builtin/providers/oracleopc/resource_ssh_key.go b/builtin/providers/oracleopc/resource_ssh_key.go deleted file mode 100644 index 29f68b4aa..000000000 --- a/builtin/providers/oracleopc/resource_ssh_key.go +++ /dev/null @@ -1,117 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceSSHKey() *schema.Resource { - return &schema.Resource{ - Create: resourceSSHKeyCreate, - Read: resourceSSHKeyRead, - Update: resourceSSHKeyUpdate, - Delete: resourceSSHKeyDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: false, - }, - }, - } -} - -func resourceSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - key := d.Get("key").(string) - enabled := d.Get("enabled").(bool) - - log.Printf("[DEBUG] Creating ssh key with name %s, key %s, enabled %s", - name, key, enabled) - - info, err := client.CreateSSHKey(name, key, enabled) - if err != nil { - return fmt.Errorf("Error creating ssh key %s: %s", name, err) - } - - d.SetId(info.Name) - updateSSHKeyResourceData(d, info) - return nil -} - -func updateSSHKeyResourceData(d *schema.ResourceData, info *compute.SSHKeyInfo) { - d.Set("name", info.Name) - d.Set("key", info.Key) - d.Set("enabled", info.Enabled) -} - -func resourceSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of ssh key %s", name) - result, err := client.GetSSHKey(name) - if err != nil { - // SSH Key does not exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading ssh key %s: %s", name, err) - } - - log.Printf("[DEBUG] Read state of ssh key %s: %#v", name, result) - updateSSHKeyResourceData(d, result) - return nil -} - -func resourceSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - key := d.Get("key").(string) - enabled := d.Get("enabled").(bool) - - log.Printf("[DEBUG] Updating ssh key with name %s, key %s, enabled %s", - name, key, enabled) - - info, err := client.UpdateSSHKey(name, key, enabled) - if err != nil { - return fmt.Errorf("Error updating ssh key %s: %s", name, err) - } - - updateSSHKeyResourceData(d, info) - return nil -} - -func resourceSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - client := meta.(*OPCClient).SSHKeys() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting ssh key volume %s", name) - if err := client.DeleteSSHKey(name); err != nil { - return fmt.Errorf("Error deleting ssh key %s: %s", name, err) - } - return nil -} diff --git a/builtin/providers/oracleopc/resource_storage_volume.go b/builtin/providers/oracleopc/resource_storage_volume.go deleted file mode 100644 index 73d4a2dc3..000000000 --- a/builtin/providers/oracleopc/resource_storage_volume.go +++ /dev/null @@ -1,301 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/oracle/terraform-provider-compute/sdk/compute" - "log" -) - -func resourceStorageVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageVolumeCreate, - Read: resourceStorageVolumeRead, - Update: resourceStorageVolumeUpdate, - Delete: resourceStorageVolumeDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sizeInBytes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - - "storage": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "/oracle/public/storage/default", - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "bootableImage": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "bootableImageVersion": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: -1, - }, - - "snapshot": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "account": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "snapshotId": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceStorageVolumeCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Resource data: %#v", d) - - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - properties := []string{d.Get("storage").(string)} - - spec := sv.NewStorageVolumeSpec( - d.Get("size").(string), - properties, - name) - - if d.Get("description").(string) != "" { - spec.SetDescription(d.Get("description").(string)) - } - - spec.SetTags(getTags(d)) - - if d.Get("bootableImage") != "" { - spec.SetBootableImage(d.Get("bootableImage").(string), d.Get("bootableImageVersion").(int)) - } - - if len(d.Get("snapshot").(*schema.Set).List()) > 0 { - snapshotDetails := d.Get("snapshot").(*schema.Set).List()[0].(map[string]interface{}) - spec.SetSnapshot( - snapshotDetails["name"].(string), - snapshotDetails["account"].(string), - ) - } - - if d.Get("snapshotId") != "" { - spec.SetSnapshotID(d.Get("snapshotId").(string)) - } - - log.Printf("[DEBUG] Creating storage volume %s with spec %#v", name, spec) - err := sv.CreateStorageVolume(spec) - if err != nil { - return fmt.Errorf("Error creating storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for storage volume %s to come online", name) - info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for storage volume %s to come online: %s", name, err) - } - - log.Printf("[DEBUG] Created storage volume %s: %#v", name, info) - - cachedAttachments, attachmentsFound := meta.(*OPCClient).storageAttachmentsByVolumeCache[name] - if attachmentsFound { - log.Printf("[DEBUG] Rebuilding storage attachments for volume %s", name) - for _, cachedAttachment := range cachedAttachments { - log.Printf("[DEBUG] Rebuilding storage attachments between volume %s and instance %s", - name, - cachedAttachment.instanceName) - - attachmentInfo, err := meta.(*OPCClient).StorageAttachments().CreateStorageAttachment( - cachedAttachment.index, - cachedAttachment.instanceName, - name, - ) - - if err != nil { - return fmt.Errorf( - "Error recreating storage attachment between volume %s and instance %s: %s", - name, - *cachedAttachment.instanceName, - err) - } - err = meta.(*OPCClient).StorageAttachments().WaitForStorageAttachmentCreated( - attachmentInfo.Name, - meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf( - "Error recreating storage attachment between volume %s and instance %s: %s", - name, - *cachedAttachment.instanceName, - err) - } - } - meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = nil - } - - d.SetId(name) - updateResourceData(d, info) - return nil -} - -func getTags(d *schema.ResourceData) []string { - tags := []string{} - for _, i := range d.Get("tags").([]interface{}) { - tags = append(tags, i.(string)) - } - return tags -} - -func updateResourceData(d *schema.ResourceData, info *compute.StorageVolumeInfo) error { - d.Set("name", info.Name) - d.Set("description", info.Description) - d.Set("storage", info.Properties[0]) - d.Set("sizeInBytes", info.Size) - d.Set("tags", info.Tags) - d.Set("bootableImage", info.ImageList) - d.Set("bootableImageVersion", info.ImageListEntry) - if info.Snapshot != "" { - d.Set("snapshot", map[string]interface{}{ - "name": info.Snapshot, - "account": info.SnapshotAccount, - }) - } - d.Set("snapshotId", info.SnapshotID) - - return nil -} - -func resourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading state of storage volume %s", name) - result, err := sv.GetStorageVolume(name) - if err != nil { - // Volume doesn't exist - if compute.WasNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading storage volume %s: %s", name, err) - } - - if len(result.Result) == 0 { - // Volume doesn't exist - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Read state of storage volume %s: %#v", name, &result.Result[0]) - updateResourceData(d, &result.Result[0]) - - return nil -} - -func resourceStorageVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - description := d.Get("description").(string) - size := d.Get("size").(string) - tags := getTags(d) - - log.Printf("[DEBUG] Updating storage volume %s with size %s, description %s, tags %#v", name, size, description, tags) - err := sv.UpdateStorageVolume(name, size, description, tags) - - if err != nil { - return fmt.Errorf("Error updating storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for updated storage volume %s to come online", name) - info, err := sv.WaitForStorageVolumeOnline(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for updated storage volume %s to come online: %s", name, err) - } - - log.Printf("[DEBUG] Updated storage volume %s: %#v", name, info) - updateResourceData(d, info) - return nil -} - -func resourceStorageVolumeDelete(d *schema.ResourceData, meta interface{}) error { - sv := meta.(*OPCClient).StorageVolumes() - name := d.Get("name").(string) - - sva := meta.(*OPCClient).StorageAttachments() - attachments, err := sva.GetStorageAttachmentsForVolume(name) - if err != nil { - return fmt.Errorf("Error retrieving storage attachments for volume %s: %s", name, err) - } - - attachmentsToCache := make([]storageAttachment, len(*attachments)) - for index, attachment := range *attachments { - log.Printf("[DEBUG] Deleting storage attachment %s for volume %s", attachment.Name, name) - sva.DeleteStorageAttachment(attachment.Name) - sva.WaitForStorageAttachmentDeleted(attachment.Name, meta.(*OPCClient).MaxRetryTimeout) - attachmentsToCache[index] = storageAttachment{ - index: attachment.Index, - instanceName: compute.InstanceNameFromString(attachment.InstanceName), - } - } - meta.(*OPCClient).storageAttachmentsByVolumeCache[name] = attachmentsToCache - - log.Printf("[DEBUG] Deleting storage volume %s", name) - err = sv.DeleteStorageVolume(name) - if err != nil { - return fmt.Errorf("Error deleting storage volume %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for storage volume %s to finish deleting", name) - err = sv.WaitForStorageVolumeDeleted(name, meta.(*OPCClient).MaxRetryTimeout) - if err != nil { - return fmt.Errorf("Error waiting for storage volume %s to finish deleting: %s", name, err) - } - - log.Printf("[DEBUG] Deleted storage volume %s", name) - return nil -} diff --git a/builtin/providers/oracleopc/resource_storage_volume_test.go b/builtin/providers/oracleopc/resource_storage_volume_test.go deleted file mode 100644 index d168b5309..000000000 --- a/builtin/providers/oracleopc/resource_storage_volume_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package opc - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccOPCStorageVolume_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: opcResourceCheck( - "opc_compute_storage_volume.test_volume", - testAccCheckStorageVolumeDestroyed), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageVolumeBasic, - Check: resource.ComposeTestCheckFunc( - opcResourceCheck( - "opc_compute_storage_volume.test_volume", - testAccCheckStorageVolumeExists), - ), - }, - }, - }) -} - -func testAccCheckStorageVolumeExists(state *OPCResourceState) error { - sv := state.StorageVolumes() - volumeName := state.Attributes["name"] - - info, err := sv.GetStorageVolume(volumeName) - if err != nil { - return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) - } - - if len(info.Result) == 0 { - return fmt.Errorf("No info found for volume %s", volumeName) - } - - return nil -} - -func testAccCheckStorageVolumeDestroyed(state *OPCResourceState) error { - sv := state.StorageVolumes() - - volumeName := state.Attributes["name"] - - info, err := sv.GetStorageVolume(volumeName) - if err != nil { - return fmt.Errorf("Error retrieving state of volume %s: %s", volumeName, err) - } - - if len(info.Result) != 0 { - return fmt.Errorf("Volume %s still exists", volumeName) - } - - return nil -} - -const testAccStorageVolumeBasic = ` -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My volume" - name = "test_volume_b" - tags = ["foo", "bar", "baz"] -} -` From 15d93749e846a87a5cfada15074a69ae66e423cb Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 4 Apr 2017 10:21:10 -0400 Subject: [PATCH 021/342] fix data_source_vnic_test --- .../providers/opc/data_source_virtual_nic_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/builtin/providers/opc/data_source_virtual_nic_test.go b/builtin/providers/opc/data_source_virtual_nic_test.go index 6c52c06a3..eacf1bf82 100644 --- a/builtin/providers/opc/data_source_virtual_nic_test.go +++ b/builtin/providers/opc/data_source_virtual_nic_test.go @@ -50,7 +50,13 @@ resource "opc_compute_instance" "test" { } } -data "opc_compute_vnic" "foo" { - name = "test-vnic-data-%d" -}`, rInt, rInt, rInt, rInt) +data "opc_compute_network_interface" "eth0" { + instance_name = "${opc_compute_instance.test.name}" + instance_id = "${opc_compute_instance.test.id}" + interface = "eth0" +} + +data "opc_compute_vnic" "foo" { + name = "${data.opc_compute_network_interface.eth0.vnic}" +}`, rInt, rInt, rInt) } From 1f9cf2f4c3997a942f6177f99f2e542539168a3f Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 4 Apr 2017 14:36:51 -0400 Subject: [PATCH 022/342] update tests, fix instances --- builtin/providers/opc/resource_instance.go | 15 ++- builtin/providers/opc/resource_route_test.go | 60 +++++++++- .../providers/opc/resource_vnic_set_test.go | 110 ++++++++++++++++-- 3 files changed, 168 insertions(+), 17 deletions(-) diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go index 8b682cf96..0d509db25 100644 --- a/builtin/providers/opc/resource_instance.go +++ b/builtin/providers/opc/resource_instance.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "log" + "strconv" "strings" "github.com/hashicorp/go-oracle-terraform/compute" @@ -837,8 +838,20 @@ func readNetworkInterfaces(d *schema.ResourceData, ifaces map[string]compute.Net return d.Set("networking_info", result) } - for _, iface := range ifaces { + for index, iface := range ifaces { res := make(map[string]interface{}) + // The index returned from the SDK holds the full device_index from the instance. + // For users convenience, we simply allow them to specify the integer equivalent of the device_index + // so a user could implement several network interfaces via `count`. + // Convert the full device_index `ethN` to `N` as an integer. + index := strings.TrimPrefix(index, "eth") + indexInt, err := strconv.Atoi(index) + if err != nil { + return err + } + res["index"] = indexInt + + // Set the proper attributes for this specific network interface if iface.DNS != nil { res["dns"] = iface.DNS } diff --git a/builtin/providers/opc/resource_route_test.go b/builtin/providers/opc/resource_route_test.go index c7d0d1f8a..5ec9aa69b 100644 --- a/builtin/providers/opc/resource_route_test.go +++ b/builtin/providers/opc/resource_route_test.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform/terraform" ) -// TODO (@jake): Properly create a vNIC Set once instances are finished func TestAccOPCRoute_Basic(t *testing.T) { rInt := acctest.RandInt() resName := "opc_compute_route.test" @@ -41,13 +40,37 @@ func TestAccOPCRoute_Basic(t *testing.T) { }) } -// TODO (@jake): Properly create a vNIC Set once instances are finished func testAccOPCRouteConfig_Basic(rInt int) string { return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-route-%d" + description = "testing-route" + ip_address_prefix = "10.1.14.0/24" +} + +resource "opc_compute_instance" "foo" { + name = "test-route-%d" + label = "testing" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-set-%d" + shared_network = false + } +} + +data "opc_compute_network_interface" "foo" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth0" +} + resource "opc_compute_vnic_set" "test" { name = "route-test-%d" description = "route-testing-%d" - virtual_nics = ["jake-manual_eth1"] + virtual_nics = ["${data.opc_compute_network_interface.foo.vnic}"] } resource "opc_compute_route" "test" { @@ -56,15 +79,40 @@ resource "opc_compute_route" "test" { admin_distance = 1 ip_address_prefix = "10.0.12.0/24" next_hop_vnic_set = "${opc_compute_vnic_set.test.name}" -}`, rInt, rInt, rInt, rInt) +}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt) } func testAccOPCRouteConfig_BasicUpdate(rInt int) string { return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-route-%d" + description = "testing-route" + ip_address_prefix = "10.1.14.0/24" +} + +resource "opc_compute_instance" "foo" { + name = "test-route-%d" + label = "testing" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-set-%d" + shared_network = false + } +} + +data "opc_compute_network_interface" "foo" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth0" +} + resource "opc_compute_vnic_set" "test" { name = "route-test-%d" description = "route-testing-%d" - virtual_nics = ["jake-manual_eth1"] + virtual_nics = ["${data.opc_compute_network_interface.foo.vnic}"] } resource "opc_compute_route" "test" { @@ -73,7 +121,7 @@ resource "opc_compute_route" "test" { admin_distance = 2 ip_address_prefix = "10.0.14.0/24" next_hop_vnic_set = "${opc_compute_vnic_set.test.name}" -}`, rInt, rInt, rInt, rInt) +}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt) } func testAccOPCCheckRouteExists(s *terraform.State) error { diff --git a/builtin/providers/opc/resource_vnic_set_test.go b/builtin/providers/opc/resource_vnic_set_test.go index 9797906c7..63320f5d3 100644 --- a/builtin/providers/opc/resource_vnic_set_test.go +++ b/builtin/providers/opc/resource_vnic_set_test.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform/terraform" ) -// TODO (@jake): Add actual vnics after instance resource is finalized func TestAccOPCVNICSet_Basic(t *testing.T) { rInt := acctest.RandInt() rName := fmt.Sprintf("testing-acc-%d", rInt) @@ -22,7 +21,7 @@ func TestAccOPCVNICSet_Basic(t *testing.T) { CheckDestroy: testAccOPCCheckVNICSetDestroy, Steps: []resource.TestStep{ { - Config: testAccVnicSetBasic(rName, rDesc), + Config: testAccVnicSetBasic(rName, rDesc, rInt), Check: resource.ComposeTestCheckFunc( testAccOPCCheckVNICSetExists, resource.TestCheckResourceAttr( @@ -36,7 +35,7 @@ func TestAccOPCVNICSet_Basic(t *testing.T) { ), }, { - Config: testAccVnicSetBasic_Update(rName, rDesc), + Config: testAccVnicSetBasic_Update(rName, rDesc, rInt), Check: resource.ComposeTestCheckFunc( testAccOPCCheckVNICSetExists, resource.TestCheckResourceAttr( @@ -91,23 +90,114 @@ func testAccOPCCheckVNICSetDestroy(s *terraform.State) error { return nil } -// TODO (@jake): add actual vnics once instance resource is finalized -func testAccVnicSetBasic(rName, rDesc string) string { +func testAccVnicSetBasic(rName, rDesc string, rInt int) string { return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-vnic-set-%d" + description = "testing-vnic-set" + ip_address_prefix = "10.1.14.0/24" +} + +resource "opc_compute_ip_network" "bar" { + name = "testing-vnic-set2-%d" + description = "testing-vnic-set2" + ip_address_prefix = "10.1.15.0/24" +} + +resource "opc_compute_instance" "foo" { + name = "test-vnic-set-%d" + label = "testing" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-set-%d" + shared_network = false + } + networking_info { + index = 1 + ip_network = "${opc_compute_ip_network.bar.id}" + vnic = "test-vnic-set2-%d" + shared_network = false + } +} + +data "opc_compute_network_interface" "foo" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth0" +} + +data "opc_compute_network_interface" "bar" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth1" +} + resource "opc_compute_vnic_set" "test" { name = "%s" description = "%s" tags = ["tag1", "tag2"] - virtual_nics = ["jake-manual_eth1", "jake_manual_two_eth1"] -}`, rName, rDesc) + virtual_nics = [ + "${data.opc_compute_network_interface.foo.vnic}", + "${data.opc_compute_network_interface.bar.vnic}", + ] +}`, rInt, rInt, rInt, rInt, rInt, rName, rDesc) } -func testAccVnicSetBasic_Update(rName, rDesc string) string { +func testAccVnicSetBasic_Update(rName, rDesc string, rInt int) string { return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-vnic-set-%d" + description = "testing-vnic-set" + ip_address_prefix = "10.1.14.0/24" +} + +resource "opc_compute_ip_network" "bar" { + name = "testing-vnic-set2-%d" + description = "testing-vnic-set2" + ip_address_prefix = "10.1.15.0/24" +} + +resource "opc_compute_instance" "foo" { + name = "test-vnic-set-%d" + label = "testing" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-set-%d" + shared_network = false + } + networking_info { + index = 1 + ip_network = "${opc_compute_ip_network.bar.id}" + vnic = "test-vnic-set2-%d" + shared_network = false + } +} + +data "opc_compute_network_interface" "foo" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth0" +} + +data "opc_compute_network_interface" "bar" { + instance_name = "${opc_compute_instance.foo.name}" + instance_id = "${opc_compute_instance.foo.id}" + interface = "eth1" +} + resource "opc_compute_vnic_set" "test" { name = "%s" description = "%s-updated" - virtual_nics = ["jake-manual_eth1", "jake_manual_two_eth1"] tags = ["tag1"] -}`, rName, rDesc) + virtual_nics = [ + "${data.opc_compute_network_interface.foo.vnic}", + "${data.opc_compute_network_interface.bar.vnic}", + ] +}`, rInt, rInt, rInt, rInt, rInt, rName, rDesc) } From d05af76607fee831ee7bf3f8057abdf72ae34de2 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 4 Apr 2017 16:28:11 -0400 Subject: [PATCH 023/342] add image_list_entry resource --- builtin/providers/opc/provider.go | 1 + .../opc/resource_image_list_entry.go | 160 +++++++++++++++++ .../opc/resource_image_list_entry_test.go | 161 ++++++++++++++++++ .../opc/resource_security_protocol.go | 1 + ...opc_compute_image_list_entry.html.markdown | 52 ++++++ .../d/opc_compute_vnic.html.markdown | 36 ---- .../providers/oracleopc/index.html.markdown | 55 ------ .../r/opc_compute_instance.html.markdown | 68 -------- .../opc_compute_ip_association.html.markdown | 31 ---- .../opc_compute_ip_reservation.html.markdown | 33 ---- ...compute_security_application.html.markdown | 39 ----- ...compute_security_association.html.markdown | 29 ---- ...opc_compute_security_ip_list.html.markdown | 28 --- .../r/opc_compute_security_list.html.markdown | 33 ---- .../r/opc_compute_security_rule.html.markdown | 46 ----- .../r/opc_compute_ssh_key.html.markdown | 32 ---- .../opc_compute_storage_volume.html.markdown | 49 ------ website/source/layouts/opc.erb | 3 + website/source/layouts/oracleopc.erb | 59 ------- 19 files changed, 378 insertions(+), 538 deletions(-) create mode 100644 builtin/providers/opc/resource_image_list_entry.go create mode 100644 builtin/providers/opc/resource_image_list_entry_test.go create mode 100644 website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/d/opc_compute_vnic.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/index.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_instance.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown delete mode 100644 website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown delete mode 100644 website/source/layouts/oracleopc.erb diff --git a/builtin/providers/opc/provider.go b/builtin/providers/opc/provider.go index 46b4fcc95..e1ac86dee 100644 --- a/builtin/providers/opc/provider.go +++ b/builtin/providers/opc/provider.go @@ -53,6 +53,7 @@ func Provider() terraform.ResourceProvider { "opc_compute_ip_network": resourceOPCIPNetwork(), "opc_compute_acl": resourceOPCACL(), "opc_compute_image_list": resourceOPCImageList(), + "opc_compute_image_list_entry": resourceOPCImageListEntry(), "opc_compute_instance": resourceInstance(), "opc_compute_ip_address_reservation": resourceOPCIPAddressReservation(), "opc_compute_ip_association": resourceOPCIPAssociation(), diff --git a/builtin/providers/opc/resource_image_list_entry.go b/builtin/providers/opc/resource_image_list_entry.go new file mode 100644 index 000000000..8262657b5 --- /dev/null +++ b/builtin/providers/opc/resource_image_list_entry.go @@ -0,0 +1,160 @@ +package opc + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceOPCImageListEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceOPCImageListEntryCreate, + Read: resourceOPCImageListEntryRead, + Delete: resourceOPCImageListEntryDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "machine_images": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "version": { + Type: schema.TypeInt, + ForceNew: true, + Required: true, + }, + "attributes": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.ValidateJsonString, + DiffSuppressFunc: structure.SuppressJsonDiff, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceOPCImageListEntryCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageListEntries() + + name := d.Get("name").(string) + machineImages := expandOPCImageListEntryMachineImages(d) + version := d.Get("version").(int) + + createInput := &compute.CreateImageListEntryInput{ + Name: name, + MachineImages: machineImages, + Version: version, + } + + if v, ok := d.GetOk("attributes"); ok { + attributesString := v.(string) + attributes, err := structure.ExpandJsonFromString(attributesString) + if err != nil { + return err + } + + createInput.Attributes = attributes + } + + _, err := client.CreateImageListEntry(createInput) + if err != nil { + return err + } + + id := generateOPCImageListEntryID(name, version) + d.SetId(id) + return resourceOPCImageListEntryRead(d, meta) +} + +func resourceOPCImageListEntryRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageListEntries() + + name, version, err := parseOPCImageListEntryID(d.Id()) + if err != nil { + return err + } + + getInput := compute.GetImageListEntryInput{ + Name: *name, + Version: *version, + } + getResult, err := client.GetImageListEntry(&getInput) + if err != nil { + return err + } + + attrs, err := structure.FlattenJsonToString(getResult.Attributes) + if err != nil { + return err + } + + d.Set("name", name) + d.Set("machine_images", getResult.MachineImages) + d.Set("version", getResult.Version) + d.Set("attributes", attrs) + d.Set("uri", getResult.Uri) + + return nil +} + +func resourceOPCImageListEntryDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).ImageListEntries() + + name, version, err := parseOPCImageListEntryID(d.Id()) + if err != nil { + return err + } + + deleteInput := &compute.DeleteImageListEntryInput{ + Name: *name, + Version: *version, + } + err = client.DeleteImageListEntry(deleteInput) + if err != nil { + return err + } + + return nil +} + +func parseOPCImageListEntryID(id string) (*string, *int, error) { + s := strings.Split(id, "|") + name, versionString := s[0], s[1] + version, err := strconv.Atoi(versionString) + if err != nil { + return nil, nil, err + } + + return &name, &version, nil +} + +func expandOPCImageListEntryMachineImages(d *schema.ResourceData) []string { + machineImages := []string{} + for _, i := range d.Get("machine_images").([]interface{}) { + machineImages = append(machineImages, i.(string)) + } + return machineImages +} + +func generateOPCImageListEntryID(name string, version int) string { + return fmt.Sprintf("%s|%d", name, version) +} diff --git a/builtin/providers/opc/resource_image_list_entry_test.go b/builtin/providers/opc/resource_image_list_entry_test.go new file mode 100644 index 000000000..4d76bd0a6 --- /dev/null +++ b/builtin/providers/opc/resource_image_list_entry_test.go @@ -0,0 +1,161 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccOPCImageListEntry_Basic(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageListEntry_basic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListEntryDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckImageListEntryExists, + }, + }, + }) +} + +func TestAccOPCImageListEntry_Complete(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageListEntry_Complete, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListEntryDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckImageListEntryExists, + }, + }, + }) +} + +func TestAccOPCImageListEntry_CompleteExpanded(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccImageListEntry_CompleteExpanded, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckImageListEntryDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: testAccCheckImageListEntryExists, + }, + }, + }) +} + +func testAccCheckImageListEntryExists(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ImageListEntries() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_image_list_entry" { + continue + } + + name, version, err := parseOPCImageListEntryID(rs.Primary.ID) + if err != nil { + return fmt.Errorf("Error parsing the Image List ID: '%s': %+v", rs.Primary.ID, err) + } + + input := compute.GetImageListEntryInput{ + Name: *name, + Version: *version, + } + + if _, err := client.GetImageListEntry(&input); err != nil { + return fmt.Errorf("Error retrieving state of Image List Entry %s: %s", input.Name, err) + } + } + + return nil +} + +func testAccCheckImageListEntryDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).ImageListEntries() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_image_list_entry" { + continue + } + + name, version, err := parseOPCImageListEntryID(rs.Primary.ID) + if err != nil { + return fmt.Errorf("Error parsing the Image List ID: $+v", err) + } + + input := compute.GetImageListEntryInput{ + Name: *name, + Version: *version, + } + if info, err := client.GetImageListEntry(&input); err == nil { + return fmt.Errorf("Image List Entry %s still exists: %#v", input.Name, info) + } + } + + return nil +} + +var testAccImageListEntry_basic = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-image-list-entry-basic-%d" + description = "Acceptance Test TestAccOPCImageListEntry_Basic" + default = 1 +} + +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + version = 1 +} +` + +var testAccImageListEntry_Complete = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-image-list-entry-basic-%d" + description = "Acceptance Test TestAccOPCImageListEntry_Basic" + default = 1 +} + +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + attributes = "{\"hello\":\"world\"}" + version = 1 +} +` + +var testAccImageListEntry_CompleteExpanded = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-image-list-entry-basic-%d" + description = "Acceptance Test TestAccOPCImageListEntry_Basic" + default = 1 +} + +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + attributes = < **Caution:** The ``opc_compute_instance`` resource can completely delete your -instance just as easily as it can create it. To avoid costly accidents, -consider setting -[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) -on your instance resources as an extra safety measure. - -## Example Usage - -``` -resource "opc_compute_instance" "test_instance" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.key1.name}"] - attributes = "{\"foo\":\"bar\"}" - storage = [{ - index = 1 - volume = "${opc_compute_storage_volume.test_volume.name}" - }, - { - index = 2 - volume = "${opc_compute_storage_volume.test_volume2.name}" - }] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The name of the instance. This need not be unique, as each instance is assigned a separate -computed `opcId`. - -* `shape` - (Required) The shape of the instance, e.g. `oc4`. - -* `imageList` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1` - -* `label` - (Optional) The label to apply to the instance. - -* `ip` - (Computed) The internal IP address assigned to the instance. - -* `opcId` - (Computed) The interned ID assigned to the instance. - -* `sshKeys` - (Optional) The names of the SSH Keys that can be used to log into the instance. - -* `attributes` - (Optional) An arbitrary JSON-formatted collection of attributes which is made available to the instance. - -* `vcable` - (Computed) The ID of the instance's VCable, which is used to associate it with reserved IP addresses and -add it to Security Lists. - -* `storage` - (Optional) A set of zero or more storage volumes to attach to the instance. Each volume has two arguments: -`index`, which is the volume's index in the instance's list of mounted volumes, and `name`, which is the name of the -storage volume to mount. - -* `bootOrder` - (Optional) The index number of the bootable storage volume that should be used to boot the instance. e.g. `[ 1 ]`. If you specify both `bootOrder` and `imageList`, the imagelist attribute is ignored. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown deleted file mode 100644 index 2518b2df1..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_association.html.markdown +++ /dev/null @@ -1,31 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_ip_association" -sidebar_current: "docs-oracleopc-resource-ip-association" -description: |- - Creates and manages an IP association in an OPC identity domain. ---- - -# opc\_compute\_ip\_association - -The ``opc_compute_ip_association`` resource creates and manages an association between an IP address and an instance in -an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_ip_association" "instance1_reservation1" { - vcable = "${opc_compute_instance.test_instance.vcable}" - parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `vcable` - (Required) The vcable of the instance to associate the IP address with. - -* `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use -the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the -prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown deleted file mode 100644 index 44b70cc0f..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ip_reservation.html.markdown +++ /dev/null @@ -1,33 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_ip_reservation" -sidebar_current: "docs-oracleopc-resource-ip-reservation" -description: |- - Creates and manages an IP reservation in an OPC identity domain. ---- - -# opc\_compute\_ip\_reservation - -The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_ip_reservation" "reservation1" { - parentpool = "/oracle/public/ippool" - permanent = true - tags = [] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `parentpool` - (Required) The pool from which to allocate the IP address. - -* `permanent` - (Required) Whether the IP address remains reserved even when it is no longer associated with an instance -(if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or -deleted and recreated (if false). - -* `tags` - (Optional) List of tags that may be applied to the IP reservation. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown deleted file mode 100644 index 94760f082..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_application.html.markdown +++ /dev/null @@ -1,39 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_application" -sidebar_current: "docs-oracleopc-resource-security-application" -description: |- - Creates and manages a security application in an OPC identity domain. ---- - -# opc\_compute\_security\_application - -The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_security_application" "tomcat" { - name = "tomcat" - protocol = "tcp" - dport = "8080" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within the identity domain) name of the application - -* `protocol` - (Required) The protocol to enable for this application. Must be either one of -`tcp`, `udp`, `icmp`, `igmp`, `ipip`, `rdp`, `esp`, `ah`, `gre`, `icmpv6`, `ospf`, `pim`, `sctp`, `mplsip` or `all`, or -the corresponding integer in the range 0-254 from the list of [assigned protocol numbers](http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) - -* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. - -* `icmptype` - (Optional) The ICMP type to enable for this application, if the `protocol` is `icmp`. Must be one of -`echo`, `reply`, `ttl`, `traceroute`, `unreachable`. - -* `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of -`network`, `host`, `protocol`, `port`, `df`, `admin`. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown deleted file mode 100644 index 49207c879..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_association.html.markdown +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_association" -sidebar_current: "docs-oracleopc-resource-security-association" -description: |- - Creates and manages a security association in an OPC identity domain. ---- - -# opc\_compute\_security\_association - -The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security -list in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_security_association" "test_instance_sec_list_1" { - vcable = "${opc_compute_instance.test_instance.vcable}" - seclist = "${opc_compute_security_list.sec_list1.name}" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `vcable` - (Required) The `vcable` of the instance to associate to the security list. - -* `seclist` - (Required) The name of the security list to associate the instance to. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown deleted file mode 100644 index 62f40d839..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_ip_list.html.markdown +++ /dev/null @@ -1,28 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_ip_list" -sidebar_current: "docs-oracleopc-resource-security-ip-list" -description: |- - Creates and manages a security IP list in an OPC identity domain. ---- - -# opc\_compute\_security\_ip\_list - -The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_security_ip_list" "sec_ip_list1" { - name = "sec-ip-list1" - ip_entries = ["217.138.34.4"] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within the identity domain) name of the security IP list. - -* `ip_entries` - (Required) The IP addresses to include in the list. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown deleted file mode 100644 index 64547a41e..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_list.html.markdown +++ /dev/null @@ -1,33 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_list" -sidebar_current: "docs-oracleopc-resource-security-list" -description: |- - Creates and manages a security list in an OPC identity domain. ---- - -# opc\_compute\_security\_list - -The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_security_list" "sec_list1" { - name = "sec-list-1" - policy = "permit" - outbound_cidr_policy = "deny" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within the identity domain) name of the security list. - -* `policy` - (Required) The policy to apply to instances associated with this list. Must be one of `permit`, -`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). - -* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list.Must be one of `permit`, -`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent). diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown deleted file mode 100644 index 6497b0265..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_security_rule.html.markdown +++ /dev/null @@ -1,46 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_security_rule" -sidebar_current: "docs-oracleopc-resource-security-rule" -description: |- - Creates and manages a security rule in an OPC identity domain. ---- - -# opc\_compute\_ip\_reservation - -The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain, which joins -together a source security list (or security IP list), a destination security list (or security IP list), and a security -application. - -## Example Usage - -``` -resource "opc_compute_security_rule" "test_rule" { - name = "test" - source_list = "seclist:${opc_compute_security_list.sec-list1.name}" - destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" - action = "permit" - application = "${opc_compute_security_application.spring-boot.name}" - disabled = false -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within the identity domain) name of the security rule. - -* `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with -`seciplist:`). - - * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with - `seciplist:`). - -* `application` - (Required) The name of the application to which the rule applies. - -* `action` - (Required) Whether to `permit`, `refuse` or `deny` packets to which this rule applies. This will ordinarily -be `permit`. - -* `disabled` - (Required) Whether to disable this security rule. This is useful if you want to temporarily disable a rule -without removing it outright from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown deleted file mode 100644 index ff85467d8..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_ssh_key.html.markdown +++ /dev/null @@ -1,32 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_ssh_key" -sidebar_current: "docs-oracleopc-resource-ssh-key" -description: |- - Creates and manages an SSH key in an OPC identity domain. ---- - -# opc\_compute\_ssh_key - -The ``opc_compute_ssh_key`` resource creates and manages an SSH key in an OPC identity domain. - -## Example Usage - -``` -resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..." - enabled = true -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within this identity domain) name of the SSH key. - -* `key` - (Required) The SSH key itself - -* `enabled` - (Required) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key, -without removing it entirely from your Terraform resource definition. diff --git a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown deleted file mode 100644 index 4b30b59ed..000000000 --- a/website/source/docs/providers/oracleopc/r/opc_compute_storage_volume.html.markdown +++ /dev/null @@ -1,49 +0,0 @@ ---- -layout: "oracleopc" -page_title: "Oracle: opc_compute_storage_volume" -sidebar_current: "docs-oracleopc-resource-storage-volume" -description: |- - Creates and manages a storage volume in an OPC identity domain. ---- - -# opc\_compute\_storage\_volume - -The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain. - -~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your -storage volume just as easily as it can create it. To avoid costly accidents, -consider setting -[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) -on your storage volume resources as an extra safety measure. - -## Example Usage - -``` -resource "opc_compute_storage_volume" "test_volume" { - size = "3g" - description = "My storage volume" - name = "test_volume_a" - tags = ["xyzzy", "quux"] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) The unique (within this identity domain) name of the storage volume. - -* `size` - (Required) The size of the storage instance. - -* `description` - (Optional) A description of the storage volume. - -* `tags` - (Optional) A list of tags to apply to the storage volume. - -* `bootableImage` - (Optional) The name of the bootable image the storage volume is loaded with. - -* `bootableImageVersion` - (Optional) The version of the bootable image specified in `bootableImage` to use. - -* `snapshot` - (Optional) The snapshot to initialise the storage volume with. This has two nested properties: `name`, -for the name of the snapshot to use, and `account` for the name of the snapshot account to use. - -* `snapshotId` - (Optional) The id of the snapshot to initialise the storage volume with. diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb index 8c9bfd793..f27a27e8b 100644 --- a/website/source/layouts/opc.erb +++ b/website/source/layouts/opc.erb @@ -31,6 +31,9 @@ > opc_compute_image_list + > + opc_compute_image_list_entry + > opc_compute_instance diff --git a/website/source/layouts/oracleopc.erb b/website/source/layouts/oracleopc.erb deleted file mode 100644 index a9d9579f8..000000000 --- a/website/source/layouts/oracleopc.erb +++ /dev/null @@ -1,59 +0,0 @@ -<% wrap_layout :inner do %> -<% content_for :sidebar do %> - -<% end %> - -<%= yield %> -<% end %> From 9062ddded331f2d72bb9d475e3f6632197508c29 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 5 Apr 2017 16:40:05 -0400 Subject: [PATCH 024/342] Add Storage Volumes to instances - Adds storage volumes to instances - Updates go-oracle-terraform vendor - Adds clarification between ip/shared network in docs - make 'bootable.image_list' a required parameter in the storage_volume resource - Add storage volume test + docs --- builtin/providers/opc/resource_instance.go | 48 +++++++++++++----- .../providers/opc/resource_instance_test.go | 50 ++++++++++++++++++- .../providers/opc/resource_storage_volume.go | 2 +- .../go-oracle-terraform/compute/instances.go | 13 +++++ vendor/vendor.json | 6 +-- .../opc/r/opc_compute_instance.html.markdown | 22 ++++++++ ...mpute_ip_address_reservation.html.markdown | 4 +- .../opc_compute_ip_association.html.markdown | 4 +- .../opc_compute_ip_reservation.html.markdown | 4 +- .../opc_compute_storage_volume.html.markdown | 2 +- 10 files changed, 130 insertions(+), 25 deletions(-) diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go index 0d509db25..2744edd5a 100644 --- a/builtin/providers/opc/resource_instance.go +++ b/builtin/providers/opc/resource_instance.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" ) func resourceInstance() *schema.Resource { @@ -232,9 +233,10 @@ func resourceInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 10), }, "volume": { Type: schema.TypeString, @@ -337,7 +339,7 @@ func resourceInstance() *schema.Resource { Computed: true, }, - "vcable_id": { + "vcable": { Type: schema.TypeString, Computed: true, }, @@ -401,8 +403,10 @@ func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { input.SSHKeys = sshKeys } - // TODO Add storage things - //storage := getStorageAttachments(d) + storage := getStorageAttachments(d) + if len(storage) > 0 { + input.Storage = storage + } if tags := getStringList(d, "tags"); len(tags) > 0 { input.Tags = tags @@ -476,7 +480,9 @@ func updateInstanceAttributes(d *schema.ResourceData, instance *compute.Instance return err } - // TODO Set Storage + if err := readStorageAttachments(d, instance.Storage); err != nil { + return err + } if err := setStringList(d, "tags", instance.Tags); err != nil { return err @@ -512,7 +518,7 @@ func updateInstanceAttributes(d *schema.ResourceData, instance *compute.Instance return err } - d.Set("vcable_id", instance.VCableID) + d.Set("vcable", instance.VCableID) d.Set("virtio", instance.Virtio) d.Set("vnc_address", instance.VNC) @@ -537,10 +543,8 @@ func resourceInstanceDelete(d *schema.ResourceData, meta interface{}) error { return nil } -// TODO Uncomment this when working on storage -/* -func getStorageAttachments(d *schema.ResourceData) []compute.StorageAttachment { - storageAttachments := []compute.StorageAttachment{} +func getStorageAttachments(d *schema.ResourceData) []compute.StorageAttachmentInput { + storageAttachments := []compute.StorageAttachmentInput{} storage := d.Get("storage").(*schema.Set) for _, i := range storage.List() { attrs := i.(map[string]interface{}) @@ -550,7 +554,7 @@ func getStorageAttachments(d *schema.ResourceData) []compute.StorageAttachment { }) } return storageAttachments -}*/ +} // Parses instance_attributes from a string to a map[string]interface and returns any errors. func getInstanceAttributes(d *schema.ResourceData) (map[string]interface{}, error) { @@ -895,3 +899,21 @@ func readNetworkInterfaces(d *schema.ResourceData, ifaces map[string]compute.Net return d.Set("networking_info", result) } + +// Flattens the returned slice of storage attachments to a map +func readStorageAttachments(d *schema.ResourceData, attachments []compute.StorageAttachment) error { + result := make([]map[string]interface{}, 0) + + if attachments == nil || len(attachments) == 0 { + return d.Set("storage", nil) + } + + for _, attachment := range attachments { + res := make(map[string]interface{}) + res["index"] = attachment.Index + res["volume"] = attachment.StorageVolumeName + res["name"] = attachment.Name + result = append(result, res) + } + return d.Set("storage", result) +} diff --git a/builtin/providers/opc/resource_instance_test.go b/builtin/providers/opc/resource_instance_test.go index 00bb66e7e..36b214db4 100644 --- a/builtin/providers/opc/resource_instance_test.go +++ b/builtin/providers/opc/resource_instance_test.go @@ -61,7 +61,7 @@ func TestAccOPCInstance_sharedNetworking(t *testing.T) { resource.TestCheckResourceAttr(resName, "reverse_dns", "true"), resource.TestCheckResourceAttr(resName, "state", "running"), resource.TestCheckResourceAttr(resName, "tags.#", "2"), - resource.TestCheckResourceAttrSet(resName, "vcable_id"), + resource.TestCheckResourceAttrSet(resName, "vcable"), resource.TestCheckResourceAttr(resName, "virtio", "false"), // Check Data Source to validate networking attributes @@ -117,6 +117,26 @@ func TestAccOPCInstance_ipNetwork(t *testing.T) { }) } +func TestAccOPCInstance_storage(t *testing.T) { + resName := "opc_compute_instance.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccOPCCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceStorage(rInt), + Check: resource.ComposeTestCheckFunc( + testAccOPCCheckInstanceExists, + resource.TestCheckResourceAttr(resName, "storage.#", "2"), + ), + }, + }, + }) +} + func testAccOPCCheckInstanceExists(s *terraform.State) error { client := testAccProvider.Meta().(*compute.Client).Instances() @@ -227,3 +247,31 @@ data "opc_compute_network_interface" "test" { } `, rInt, rInt, rInt) } + +func testAccInstanceStorage(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_storage_volume" "foo" { + name = "acc-test-instance-%d" + size = 1 +} + +resource "opc_compute_storage_volume" "bar" { + name = "acc-test-instance-2-%d" + size = 1 +} + +resource "opc_compute_instance" "test" { + name = "acc-test-instance-%d" + label = "TestAccOPCInstance_basic" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + storage { + volume = "${opc_compute_storage_volume.foo.name}" + index = 1 + } + storage { + volume = "${opc_compute_storage_volume.bar.name}" + index = 2 + } +}`, rInt, rInt, rInt) +} diff --git a/builtin/providers/opc/resource_storage_volume.go b/builtin/providers/opc/resource_storage_volume.go index 4f7e90260..9d5a2b13d 100644 --- a/builtin/providers/opc/resource_storage_volume.go +++ b/builtin/providers/opc/resource_storage_volume.go @@ -51,7 +51,7 @@ func resourceOPCStorageVolume() *schema.Resource { Schema: map[string]*schema.Schema{ "image_list": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, }, diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go index b697b36e3..7368e9ba2 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/instances.go @@ -365,6 +365,7 @@ func (c *InstancesClient) GetInstance(input *GetInstanceInput) (*InstanceInfo, e responseBody.SSHKeys = sshKeyNames responseBody.Networking = c.unqualifyNetworking(responseBody.Networking) + responseBody.Storage = c.unqualifyStorage(responseBody.Storage) return &responseBody, nil } @@ -538,3 +539,15 @@ func (c *InstancesClient) unqualifyNat(nat []string) []string { } return unQualifiedNats } + +func (c *InstancesClient) unqualifyStorage(attachments []StorageAttachment) []StorageAttachment { + unqAttachments := []StorageAttachment{} + for _, v := range attachments { + if v.StorageVolumeName != "" { + v.StorageVolumeName = c.getUnqualifiedName(v.StorageVolumeName) + } + unqAttachments = append(unqAttachments, v) + } + + return unqAttachments +} diff --git a/vendor/vendor.json b/vendor/vendor.json index ed725ac15..63c77ed33 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1970,10 +1970,10 @@ "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, { - "checksumSHA1": "Sqz9+8frdOIkyK/v4IjjInZAp4Y=", + "checksumSHA1": "QKusHEboSl00AnORqkjv0gZEhqw=", "path": "github.com/hashicorp/go-oracle-terraform/compute", - "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", - "revisionTime": "2017-03-29T21:19:34Z" + "revision": "15f277fb824b7af18c6bef8d30d84174154f989b", + "revisionTime": "2017-04-05T20:02:51Z" }, { "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", diff --git a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown index 56b6240c4..1198cf7ee 100644 --- a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown @@ -37,6 +37,10 @@ resource "opc_compute_instance" "test_instance" { vnic = "testing-vnic-name" shared_network = false } + storage { + volume = "${opc_compute_storage_volume.foo.name}" + index = 1 + } } ``` @@ -60,6 +64,8 @@ The following arguments are supported: * `networking_info` - (Optional) Information pertaining to an individual network interface to be created and attached to the instance. See [Networking Info](#networking-info) below for more information. +* `storage` - (Optional) Information pertaining to an individual storage attachment to be created during instance creation. Please see [Storage Attachments](#storage-attachments) below for more information. + * `reverse_dns` - (Optional) If set to `true` (default), then reverse DNS records are created. If set to `false`, no reverse DNS records are created. * `ssh_keys` - (Optional) A list of the names of the SSH Keys that can be used to log into the instance. @@ -127,6 +133,22 @@ The following attributes are supported: * `vnic` - (Optional, IP Network Only) The name of the vNIC created for the IP Network. * `vnic_sets` - (Optional, IP Network Only) The array of vNIC Sets the interface was added to. +## Storage Attachments + +Each Storage Attachment config manages a single storage attachment that is created _during instance creation_. +This means that any storage attachments created during instance creation cannot be detached from the instance. +Use the `resource_storage_attachment` resource to manage storage attachments for instances if you wish to detach the +storage volumes at a later date. + +The following attributes are supported: + +* `index` - (Required) The Index number of the volume attachment. `1` is the boot volume for the instance. Values `1-10` allowed. +* `volume` - (Required) The name of the storage volume to attach to the instance. + +In addition to the above attributes, the following attributes are exported for a storage volume + +* `name` - Name of the storage volume attachment. + ## Attributes Reference In addition to the attributes listed above, the following attributes are exported: diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown index 00f0befd9..2f95c3523 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown @@ -3,12 +3,12 @@ layout: "opc" page_title: "Oracle: opc_compute_ip_address_reservation" sidebar_current: "docs-opc-resource-ip-address-reservation" description: |- - Creates and manages an IP address reservation in an OPC identity domain. + Creates and manages an IP address reservation in an OPC identity domain for an IP Network. --- # opc\_compute\_ip\_address\_reservation -The ``opc_compute_ip_address_reservation`` resource creates and manages an IP address reservation in an OPC identity domain. +The ``opc_compute_ip_address_reservation`` resource creates and manages an IP address reservation in an OPC identity domain, for an IP Network. ## Example Usage diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown index a148875ff..0890dbfdb 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown @@ -3,13 +3,13 @@ layout: "opc" page_title: "Oracle: opc_compute_ip_association" sidebar_current: "docs-opc-resource-ip-association" description: |- - Creates and manages an IP association in an OPC identity domain. + Creates and manages an IP association in an OPC identity domain for the Shared Network. --- # opc\_compute\_ip\_association The ``opc_compute_ip_association`` resource creates and manages an association between an IP address and an instance in -an OPC identity domain. +an OPC identity domain, for the Shared Network. ## Example Usage diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown index f937e03f9..1201fe9cc 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown @@ -3,12 +3,12 @@ layout: "opc" page_title: "Oracle: opc_compute_ip_reservation" sidebar_current: "docs-opc-resource-ip-reservation" description: |- - Creates and manages an IP reservation in an OPC identity domain. + Creates and manages an IP reservation in an OPC identity domain for the Shared Network. --- # opc\_compute\_ip\_reservation -The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain. +The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain for the Shared Network. ## Example Usage diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index 103f48eea..7fa308ff5 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -53,7 +53,7 @@ The following arguments are supported: * `tags` - (Optional) Comma-separated strings that tag the storage volume. `bootable` supports the following: -* `image_list` - (Optional) Defines an image list. +* `image_list` - (Required) Defines an image list. * `image_list_entry` - (Optional) Defines an image list entry. ## Attributes Reference From 871d33d90d9c0c7189e4ff4efc0bd264b7e7428b Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 5 Apr 2017 18:14:11 -0400 Subject: [PATCH 025/342] Fix instance attributes --- builtin/providers/opc/resource_instance.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go index 2744edd5a..8157835ad 100644 --- a/builtin/providers/opc/resource_instance.go +++ b/builtin/providers/opc/resource_instance.go @@ -51,12 +51,10 @@ func resourceInstance() *schema.Resource { // Optional Attributes // ///////////////////////// "instance_attributes": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return true - }, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.ValidateJsonString, }, "boot_order": { @@ -367,7 +365,12 @@ func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { } // Get optional instance attributes - if attributes, err := getInstanceAttributes(d); err != nil && attributes != nil { + attributes, attrErr := getInstanceAttributes(d) + if attrErr != nil { + return attrErr + } + + if attributes != nil { input.Attributes = attributes } From e569fd3f6c66c29969b4b649fd99895a20a41c81 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 02:07:38 -0400 Subject: [PATCH 026/342] provider/opc: Add Storage Volume Snapshots Adds `storage_volume_snapshot` resource, and allows for creating a storage_volume from a storage volume snapshot. Also adds documentation for additions, and tests. ``` TF_ACC=1 go test ./builtin/providers/opc -v -run=TestAccOPCStorageVolume -timeout 120m === RUN TestAccOPCStorageVolumeSnapshot_importBasic --- PASS: TestAccOPCStorageVolumeSnapshot_importBasic (24.72s) === RUN TestAccOPCStorageVolumeSnapshot_basic --- PASS: TestAccOPCStorageVolumeSnapshot_basic (24.01s) === RUN TestAccOPCStorageVolume_Basic --- PASS: TestAccOPCStorageVolume_Basic (12.99s) === RUN TestAccOPCStorageVolume_Complete --- PASS: TestAccOPCStorageVolume_Complete (13.91s) === RUN TestAccOPCStorageVolume_MaxSize --- PASS: TestAccOPCStorageVolume_MaxSize (14.47s) === RUN TestAccOPCStorageVolume_Update --- PASS: TestAccOPCStorageVolume_Update (22.36s) === RUN TestAccOPCStorageVolume_FromSnapshot --- PASS: TestAccOPCStorageVolume_FromSnapshot (32.28s) ``` --- .../import_storage_volume_snapshot_test.go | 29 ++ builtin/providers/opc/provider.go | 43 +-- .../providers/opc/resource_storage_volume.go | 47 +++- .../opc/resource_storage_volume_snapshot.go | 227 ++++++++++++++++ .../resource_storage_volume_snapshot_test.go | 88 ++++++ .../opc/resource_storage_volume_test.go | 49 ++++ .../compute/ip_address_associations.go | 152 +++++++++++ .../compute/storage_volume_snapshots.go | 250 ++++++++++++++++++ .../compute/storage_volumes.go | 1 + vendor/vendor.json | 6 +- .../opc_compute_storage_volume.html.markdown | 3 + ...pute_storage_volume_snapshot.html.markdown | 58 ++++ 12 files changed, 923 insertions(+), 30 deletions(-) create mode 100644 builtin/providers/opc/import_storage_volume_snapshot_test.go create mode 100644 builtin/providers/opc/resource_storage_volume_snapshot.go create mode 100644 builtin/providers/opc/resource_storage_volume_snapshot_test.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go create mode 100644 website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown diff --git a/builtin/providers/opc/import_storage_volume_snapshot_test.go b/builtin/providers/opc/import_storage_volume_snapshot_test.go new file mode 100644 index 000000000..68654c13b --- /dev/null +++ b/builtin/providers/opc/import_storage_volume_snapshot_test.go @@ -0,0 +1,29 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCStorageVolumeSnapshot_importBasic(t *testing.T) { + resourceName := "opc_compute_storage_volume_snapshot.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeSnapshotDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccStorageVolumeSnapshot_basic(rInt), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/provider.go b/builtin/providers/opc/provider.go index e1ac86dee..579d5ac7b 100644 --- a/builtin/providers/opc/provider.go +++ b/builtin/providers/opc/provider.go @@ -50,27 +50,28 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "opc_compute_ip_network": resourceOPCIPNetwork(), - "opc_compute_acl": resourceOPCACL(), - "opc_compute_image_list": resourceOPCImageList(), - "opc_compute_image_list_entry": resourceOPCImageListEntry(), - "opc_compute_instance": resourceInstance(), - "opc_compute_ip_address_reservation": resourceOPCIPAddressReservation(), - "opc_compute_ip_association": resourceOPCIPAssociation(), - "opc_compute_ip_network_exchange": resourceOPCIPNetworkExchange(), - "opc_compute_ip_reservation": resourceOPCIPReservation(), - "opc_compute_route": resourceOPCRoute(), - "opc_compute_security_application": resourceOPCSecurityApplication(), - "opc_compute_security_association": resourceOPCSecurityAssociation(), - "opc_compute_security_ip_list": resourceOPCSecurityIPList(), - "opc_compute_security_list": resourceOPCSecurityList(), - "opc_compute_security_rule": resourceOPCSecurityRule(), - "opc_compute_sec_rule": resourceOPCSecRule(), - "opc_compute_ssh_key": resourceOPCSSHKey(), - "opc_compute_storage_volume": resourceOPCStorageVolume(), - "opc_compute_vnic_set": resourceOPCVNICSet(), - "opc_compute_security_protocol": resourceOPCSecurityProtocol(), - "opc_compute_ip_address_prefix_set": resourceOPCIPAddressPrefixSet(), + "opc_compute_ip_network": resourceOPCIPNetwork(), + "opc_compute_acl": resourceOPCACL(), + "opc_compute_image_list": resourceOPCImageList(), + "opc_compute_image_list_entry": resourceOPCImageListEntry(), + "opc_compute_instance": resourceInstance(), + "opc_compute_ip_address_reservation": resourceOPCIPAddressReservation(), + "opc_compute_ip_association": resourceOPCIPAssociation(), + "opc_compute_ip_network_exchange": resourceOPCIPNetworkExchange(), + "opc_compute_ip_reservation": resourceOPCIPReservation(), + "opc_compute_route": resourceOPCRoute(), + "opc_compute_security_application": resourceOPCSecurityApplication(), + "opc_compute_security_association": resourceOPCSecurityAssociation(), + "opc_compute_security_ip_list": resourceOPCSecurityIPList(), + "opc_compute_security_list": resourceOPCSecurityList(), + "opc_compute_security_rule": resourceOPCSecurityRule(), + "opc_compute_sec_rule": resourceOPCSecRule(), + "opc_compute_ssh_key": resourceOPCSSHKey(), + "opc_compute_storage_volume": resourceOPCStorageVolume(), + "opc_compute_storage_volume_snapshot": resourceOPCStorageVolumeSnapshot(), + "opc_compute_vnic_set": resourceOPCVNICSet(), + "opc_compute_security_protocol": resourceOPCSecurityProtocol(), + "opc_compute_ip_address_prefix_set": resourceOPCIPAddressPrefixSet(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/opc/resource_storage_volume.go b/builtin/providers/opc/resource_storage_volume.go index 9d5a2b13d..28f3e7e05 100644 --- a/builtin/providers/opc/resource_storage_volume.go +++ b/builtin/providers/opc/resource_storage_volume.go @@ -42,6 +42,26 @@ func resourceOPCStorageVolume() *schema.Resource { }, true), }, + "snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "bootable": { Type: schema.TypeList, Optional: true, @@ -128,7 +148,19 @@ func resourceOPCStorageVolumeCreate(d *schema.ResourceData, meta interface{}) er Tags: getStringList(d, "tags"), } - expandOPCStorageVolumeOptionalFields(d, input) + expandOPCStorageVolumeOptionalFields(d, &input) + + if v, ok := d.GetOk("snapshot"); ok { + input.Snapshot = v.(string) + } + + if v, ok := d.GetOk("snapshot_account"); ok { + input.SnapshotAccount = v.(string) + } + + if v, ok := d.GetOk("snapshot_id"); ok { + input.SnapshotID = v.(string) + } info, err := client.CreateStorageVolume(&input) if err != nil { @@ -188,6 +220,9 @@ func resourceOPCStorageVolumeRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", result.Name) d.Set("description", result.Description) d.Set("storage", result.Properties[0]) + d.Set("snapshot", result.Snapshot) + d.Set("snapshot_id", result.SnapshotID) + d.Set("snapshot_account", result.SnapshotAccount) size, err := strconv.Atoi(result.Size) if err != nil { return err @@ -220,11 +255,11 @@ func resourceOPCStorageVolumeDelete(d *schema.ResourceData, meta interface{}) er return nil } -func expandOPCStorageVolumeOptionalFields(d *schema.ResourceData, input compute.CreateStorageVolumeInput) { - value, exists := d.GetOk("bootable") - input.Bootable = exists - if exists { - configs := value.([]interface{}) +func expandOPCStorageVolumeOptionalFields(d *schema.ResourceData, input *compute.CreateStorageVolumeInput) { + bootValue, bootExists := d.GetOk("bootable") + input.Bootable = bootExists + if bootExists { + configs := bootValue.([]interface{}) config := configs[0].(map[string]interface{}) input.ImageList = config["image_list"].(string) diff --git a/builtin/providers/opc/resource_storage_volume_snapshot.go b/builtin/providers/opc/resource_storage_volume_snapshot.go new file mode 100644 index 000000000..629dc2f3c --- /dev/null +++ b/builtin/providers/opc/resource_storage_volume_snapshot.go @@ -0,0 +1,227 @@ +package opc + +import ( + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceOPCStorageVolumeSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceOPCStorageVolumeSnapshotCreate, + Read: resourceOPCStorageVolumeSnapshotRead, + Delete: resourceOPCStorageVolumeSnapshotDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + // Required Attributes + "volume": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // Optional Attributes + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // Optional, but also computed if unspecified + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "parent_volume_bootable": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "false", + }, + + "collocated": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "tags": tagsForceNewSchema(), + + // Computed Attributes + "account": { + Type: schema.TypeString, + Computed: true, + }, + + "machine_image_name": { + Type: schema.TypeString, + Computed: true, + }, + + "size": { + Type: schema.TypeString, + Computed: true, + }, + + "property": { + Type: schema.TypeString, + Computed: true, + }, + + "platform": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + + "start_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "status_detail": { + Type: schema.TypeString, + Computed: true, + }, + + "status_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + + "uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceOPCStorageVolumeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).StorageVolumeSnapshots() + + // Get required attribute + input := &compute.CreateStorageVolumeSnapshotInput{ + Volume: d.Get("volume").(string), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = v.(string) + } + + if v, ok := d.GetOk("name"); ok { + input.Name = v.(string) + } + + if v, ok := d.GetOk("parent_volume_bootable"); ok { + input.ParentVolumeBootable = v.(string) + } + + collocated := d.Get("collocated").(bool) + if collocated { + input.Property = compute.SnapshotPropertyCollocated + } + + tags := getStringList(d, "tags") + if len(tags) > 0 { + input.Tags = tags + } + + info, err := client.CreateStorageVolumeSnapshot(input) + if err != nil { + return fmt.Errorf("Error creating snapshot '%s': %v", input.Name, err) + } + + d.SetId(info.Name) + return resourceOPCStorageVolumeSnapshotRead(d, meta) +} + +func resourceOPCStorageVolumeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).StorageVolumeSnapshots() + + name := d.Id() + input := &compute.GetStorageVolumeSnapshotInput{ + Name: name, + } + + result, err := client.GetStorageVolumeSnapshot(input) + if err != nil { + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading storage volume snapshot '%s': %v", name, err) + } + + if result == nil { + d.SetId("") + return nil + } + + d.Set("volume", result.Volume) + d.Set("description", result.Description) + d.Set("name", result.Name) + d.Set("parent_volume_bootable", result.ParentVolumeBootable) + d.Set("property", result.Property) + d.Set("platform", result.Platform) + d.Set("account", result.Account) + d.Set("machine_image_name", result.MachineImageName) + d.Set("size", result.Size) + d.Set("snapshot_timestamp", result.SnapshotTimestamp) + d.Set("snapshot_id", result.SnapshotID) + d.Set("start_timestamp", result.StartTimestamp) + d.Set("status", result.Status) + d.Set("status_detail", result.StatusDetail) + d.Set("status_timestamp", result.StatusTimestamp) + d.Set("uri", result.URI) + + if result.Property != compute.SnapshotPropertyCollocated { + d.Set("collocated", false) + } else { + d.Set("collocated", true) + } + + if err := setStringList(d, "tags", result.Tags); err != nil { + return err + } + + return nil +} + +func resourceOPCStorageVolumeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).StorageVolumeSnapshots() + + name := d.Id() + + input := &compute.DeleteStorageVolumeSnapshotInput{ + Name: name, + } + + if err := client.DeleteStorageVolumeSnapshot(input); err != nil { + return fmt.Errorf("Error deleting storage volume snapshot '%s': %v", name, err) + } + + return nil +} diff --git a/builtin/providers/opc/resource_storage_volume_snapshot_test.go b/builtin/providers/opc/resource_storage_volume_snapshot_test.go new file mode 100644 index 000000000..9d0c40798 --- /dev/null +++ b/builtin/providers/opc/resource_storage_volume_snapshot_test.go @@ -0,0 +1,88 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCStorageVolumeSnapshot_basic(t *testing.T) { + snapshotName := "opc_compute_storage_volume_snapshot.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(snapshotName, testAccCheckStorageVolumeSnapshotDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccStorageVolumeSnapshot_basic(rInt), + Check: resource.ComposeTestCheckFunc(opcResourceCheck(snapshotName, testAccCheckStorageVolumeSnapshotExists), + resource.TestCheckResourceAttr(snapshotName, "name", fmt.Sprintf("test-acc-stor-vol-%d", rInt)), + resource.TestCheckResourceAttr(snapshotName, "parent_volume_bootable", "false"), + resource.TestCheckResourceAttr(snapshotName, "collocated", "true"), + resource.TestCheckResourceAttr(snapshotName, "size", "5"), + ), + }, + }, + }) +} + +func testAccCheckStorageVolumeSnapshotExists(state *OPCResourceState) error { + client := state.Client.StorageVolumeSnapshots() + snapshotName := state.Attributes["name"] + + input := &compute.GetStorageVolumeSnapshotInput{ + Name: snapshotName, + } + + info, err := client.GetStorageVolumeSnapshot(input) + if err != nil { + return fmt.Errorf("Error retrieving state of snapshot '%s': %v", snapshotName, err) + } + + if info == nil { + return fmt.Errorf("No info found for snapshot '%s'", snapshotName) + } + + return nil +} + +func testAccCheckStorageVolumeSnapshotDestroyed(state *OPCResourceState) error { + client := state.Client.StorageVolumeSnapshots() + snapshotName := state.Attributes["name"] + + input := &compute.GetStorageVolumeSnapshotInput{ + Name: snapshotName, + } + info, err := client.GetStorageVolumeSnapshot(input) + if err != nil { + return fmt.Errorf("Error retrieving state of snapshot '%s': %v", snapshotName, err) + } + + if info != nil { + return fmt.Errorf("Snapshot '%s' still exists", snapshotName) + } + + return nil +} + +func testAccStorageVolumeSnapshot_basic(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_storage_volume" "foo" { + name = "test-acc-stor-vol-%d" + description = "testAccStorageVolumeSnapshot_basic" + size = 5 +} + +resource "opc_compute_storage_volume_snapshot" "test" { + name = "test-acc-stor-vol-%d" + description = "storage volume snapshot" + collocated = true + volume = "${opc_compute_storage_volume.foo.name}" +} +`, rInt, rInt) +} diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index 187e3c772..0c6ec0d90 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -116,6 +116,29 @@ func TestAccOPCStorageVolume_Bootable(t *testing.T) { }) } +func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { + volumeResourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccStorageVolumeFromSnapshot(rInt), + Check: resource.ComposeTestCheckFunc( + opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeExists), + resource.TestCheckResourceAttr(volumeResourceName, "name", fmt.Sprintf("test-acc-stor-vol-final-%d", rInt)), + resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot"), + resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot_id"), + resource.TestCheckResourceAttr(volumeResourceName, "size", "5"), + ), + }, + }, + }) +} + func testAccCheckStorageVolumeExists(state *OPCResourceState) error { sv := state.Client.StorageVolumes() volumeName := state.Attributes["name"] @@ -204,3 +227,29 @@ resource "opc_compute_storage_volume" "test" { size = 2048 } ` + +func testAccStorageVolumeFromSnapshot(rInt int) string { + return fmt.Sprintf(` +// Initial Storage Volume to create snapshot with +resource "opc_compute_storage_volume" "foo" { + name = "test-acc-stor-vol-%d" + description = "Acc Test intermediary storage volume for snapshot" + size = 5 +} + +resource "opc_compute_storage_volume_snapshot" "foo" { + description = "testing-acc" + name = "test-acc-stor-snapshot-%d" + collocated = true + volume = "${opc_compute_storage_volume.foo.name}" +} + +// Create storage volume from snapshot +resource "opc_compute_storage_volume" "test" { + name = "test-acc-stor-vol-final-%d" + description = "storage volume from snapshot" + size = 5 + snapshot_id = "${opc_compute_storage_volume_snapshot.foo.snapshot_id}" +} +`, rInt, rInt, rInt) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go new file mode 100644 index 000000000..335ada558 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go @@ -0,0 +1,152 @@ +package compute + +const ( + IPAddressAssociationDescription = "ip address association" + IPAddressAssociationContainerPath = "/network/v1/ipassociation/" + IPAddressAssociationResourcePath = "/network/v1/ipassociation" +) + +type IPAddressAssociationsClient struct { + ResourceClient +} + +// IPAddressAssociations() returns an IPAddressAssociationsClient that can be used to access the +// necessary CRUD functions for IP Address Associations. +func (c *Client) IPAddressAssociations() *IPAddressAssociationsClient { + return &IPAddressAssociationsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: IPAddressAssociationDescription, + ContainerPath: IPAddressAssociationContainerPath, + ResourceRootPath: IPAddressAssociationResourcePath, + }, + } +} + +// IPAddressAssociationInfo contains the exported fields necessary to hold all the information about an +// IP Address Association +type IPAddressAssociationInfo struct { + // The name of the NAT IP address reservation. + IPAddressReservation string `json:"ipAddressReservation"` + // Name of the virtual NIC associated with this NAT IP reservation. + Vnic string `json:"vnic"` + // The name of the IP Address Association + Name string `json:"name"` + // Description of the IP Address Association + Description string `json:"description"` + // Slice of tags associated with the IP Address Association + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Address Association + Uri string `json:"uri"` +} + +type CreateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Address Association from an IPAddressAssociationsClient and an input struct. +// Returns a populated Info struct for the IP Address Association, and any errors +func (c *IPAddressAssociationsClient) CreateIPAddressAssociation(input *CreateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPAddressReservation = c.getQualifiedName(input.IPAddressReservation) + input.Vnic = c.getQualifiedName(input.Vnic) + + var ipInfo IPAddressAssociationInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPAddressAssociationInfo struct from an input struct +func (c *IPAddressAssociationsClient) GetIPAddressAssociation(input *GetIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressAssociationInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateIPAddressAssociationInput defines what to update in a ip address association +type UpdateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPAddressAssociation update the ip address association +func (c *IPAddressAssociationsClient) UpdateIPAddressAssociation(updateInput *UpdateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.IPAddressReservation = c.getQualifiedName(updateInput.IPAddressReservation) + updateInput.Vnic = c.getQualifiedName(updateInput.Vnic) + var ipInfo IPAddressAssociationInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPAddressAssociationsClient) DeleteIPAddressAssociation(input *DeleteIPAddressAssociationInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPAddressAssociationInfo struct +func (c *IPAddressAssociationsClient) success(info *IPAddressAssociationInfo) (*IPAddressAssociationInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.Vnic) + c.unqualify(&info.IPAddressReservation) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go new file mode 100644 index 000000000..f958d2107 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go @@ -0,0 +1,250 @@ +package compute + +import ( + "fmt" + "strings" +) + +const ( + StorageVolumeSnapshotDescription = "storage volume snapshot" + StorageVolumeSnapshotContainerPath = "/storage/snapshot/" + StorageVolumeSnapshotResourcePath = "/storage/snapshot" + + WaitForSnapshotCreateTimeout = 1200 + WaitForSnapshotDeleteTimeout = 1500 + + // Collocated Snapshot Property + SnapshotPropertyCollocated = "/oracle/private/storage/snapshot/collocated" +) + +// StorageVolumeSnapshotClient is a client for the Storage Volume Snapshot functions of the Compute API. +type StorageVolumeSnapshotClient struct { + ResourceClient +} + +func (c *Client) StorageVolumeSnapshots() *StorageVolumeSnapshotClient { + return &StorageVolumeSnapshotClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: StorageVolumeSnapshotDescription, + ContainerPath: StorageVolumeSnapshotContainerPath, + ResourceRootPath: StorageVolumeSnapshotResourcePath, + }, + } +} + +// StorageVolumeSnapshotInfo represents the information retrieved from the service about a storage volume snapshot +type StorageVolumeSnapshotInfo struct { + // Account to use for snapshots + Account string `json:"account"` + + // Description of the snapshot + Description string `json:"description"` + + // The name of the machine image that's used in the boot volume from which this snapshot is taken + MachineImageName string `json:"machineimage_name"` + + // Name of the snapshot + Name string `json:"name"` + + // String indicating whether the parent volume is bootable or not + ParentVolumeBootable string `json:"parent_volume_bootable"` + + // Platform the snapshot is compatible with + Platform string `json:"platform"` + + // String determining whether the snapshot is remote or collocated + Property string `json:"property"` + + // The size of the snapshot in GB + Size string `json:"size"` + + // The ID of the snapshot. Generated by the server + SnapshotID string `json:"snapshot_id"` + + // The timestamp of the storage snapshot + SnapshotTimestamp string `json:"snapshot_timestamp"` + + // Timestamp for when the operation started + StartTimestamp string `json:"start_timestamp"` + + // Status of the snapshot + Status string `json:"status"` + + // Status Detail of the storage snapshot + StatusDetail string `json:"status_detail"` + + // Indicates the time that the current view of the storage volume snapshot was generated. + StatusTimestamp string `json:"status_timestamp"` + + // Array of tags for the snapshot + Tags []string `json:"tags,omitempty"` + + // Uniform Resource Identifier + URI string `json:"uri"` + + // Name of the parent storage volume for the snapshot + Volume string `json:"volume"` +} + +// CreateStorageVolumeSnapshotInput represents the body of an API request to create a new storage volume snapshot +type CreateStorageVolumeSnapshotInput struct { + // Description of the snapshot + // Optional + Description string `json:"description,omitempty"` + + // Name of the snapshot + // Optional, will be generated if not specified + Name string `json:"name,omitempty"` + + // Whether or not the parent volume is bootable + // Optional + ParentVolumeBootable string `json:"parent_volume_bootable,omitempty"` + + // Whether collocated or remote + // Optional, will be remote if unspecified + Property string `json:"property,omitempty"` + + // Array of tags for the snapshot + // Optional + Tags []string `json:"tags,omitempty"` + + // Name of the volume to create the snapshot from + // Required + Volume string `json:"volume"` + + // Timeout (in seconds) to wait for snapshot to be completed. Will use default if unspecified + Timeout int +} + +// CreateStorageVolumeSnapshot creates a snapshot based on the supplied information struct +func (c *StorageVolumeSnapshotClient) CreateStorageVolumeSnapshot(input *CreateStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + if input.Name != "" { + input.Name = c.getQualifiedName(input.Name) + } + input.Volume = c.getQualifiedName(input.Volume) + + var storageSnapshotInfo StorageVolumeSnapshotInfo + if err := c.createResource(&input, &storageSnapshotInfo); err != nil { + return nil, err + } + + timeout := WaitForSnapshotCreateTimeout + if input.Timeout != 0 { + timeout = input.Timeout + } + + // The name of the snapshot could have been generated. Use the response name as input + return c.waitForStorageSnapshotAvailable(storageSnapshotInfo.Name, timeout) +} + +// GetStorageVolumeSnapshotInput represents the body of an API request to get information on a storage volume snapshot +type GetStorageVolumeSnapshotInput struct { + // Name of the snapshot + Name string `json:"name"` +} + +// GetStorageVolumeSnapshot makes an API request to populate information on a storage volume snapshot +func (c *StorageVolumeSnapshotClient) GetStorageVolumeSnapshot(input *GetStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + var storageSnapshot StorageVolumeSnapshotInfo + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &storageSnapshot); err != nil { + if WasNotFoundError(err) { + return nil, nil + } + + return nil, err + } + return c.success(&storageSnapshot) +} + +// DeleteStorageVolumeSnapshotInput represents the body of an API request to delete a storage volume snapshot +type DeleteStorageVolumeSnapshotInput struct { + // Name of the snapshot to delete + Name string `json:"name"` + + // Timeout in seconds to wait for deletion, will use default if unspecified + Timeout int +} + +// DeleteStoragevolumeSnapshot makes an API request to delete a storage volume snapshot +func (c *StorageVolumeSnapshotClient) DeleteStorageVolumeSnapshot(input *DeleteStorageVolumeSnapshotInput) error { + input.Name = c.getQualifiedName(input.Name) + + if err := c.deleteResource(input.Name); err != nil { + return err + } + + timeout := WaitForSnapshotDeleteTimeout + if input.Timeout != 0 { + timeout = input.Timeout + } + + return c.waitForStorageSnapshotDeleted(input.Name, timeout) +} + +func (c *StorageVolumeSnapshotClient) success(result *StorageVolumeSnapshotInfo) (*StorageVolumeSnapshotInfo, error) { + c.unqualify(&result.Name) + c.unqualify(&result.Volume) + + sizeInGigaBytes, err := sizeInGigaBytes(result.Size) + if err != nil { + return nil, err + } + result.Size = sizeInGigaBytes + + return result, nil +} + +// Waits for a storage snapshot to become available +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotAvailable(name string, timeout int) (*StorageVolumeSnapshotInfo, error) { + var result *StorageVolumeSnapshotInfo + + err := c.waitFor( + fmt.Sprintf("storage volume snapshot %s to become available", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if err != nil { + return false, err + } + + if res != nil { + result = res + if strings.ToLower(result.Status) == "completed" { + return true, nil + } else if strings.ToLower(result.Status) == "error" { + return false, fmt.Errorf("Snapshot '%s' failed to create successfully. Status: %s Status Detail: %s", result.Name, result.Status, result.StatusDetail) + } + } + + return false, nil + }) + + return result, err +} + +// Waits for a storage snapshot to be deleted +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotDeleted(name string, timeout int) error { + return c.waitFor( + fmt.Sprintf("storage volume snapshot %s to be deleted", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if res == nil { + return true, nil + } + + if err != nil { + return false, err + } + + return res == nil, nil + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go index c40c354a9..7193a6c24 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go @@ -188,6 +188,7 @@ type GetStorageVolumeInput struct { func (c *StorageVolumeClient) success(result *StorageVolumeInfo) (*StorageVolumeInfo, error) { c.unqualify(&result.Name) + c.unqualify(&result.Snapshot) sizeInMegaBytes, err := sizeInGigaBytes(result.Size) if err != nil { diff --git a/vendor/vendor.json b/vendor/vendor.json index 63c77ed33..0f8bd50ef 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1970,10 +1970,10 @@ "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, { - "checksumSHA1": "QKusHEboSl00AnORqkjv0gZEhqw=", + "checksumSHA1": "mVLpbxsm+8TlXKgkezrh3c5I7+4=", "path": "github.com/hashicorp/go-oracle-terraform/compute", - "revision": "15f277fb824b7af18c6bef8d30d84174154f989b", - "revisionTime": "2017-04-05T20:02:51Z" + "revision": "381402af3554bcca5fa7eeda94d47003e6ba7ee7", + "revisionTime": "2017-04-06T04:33:22Z" }, { "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index 7fa308ff5..c4a682f41 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -51,6 +51,9 @@ The following arguments are supported: * `storage_type` - (Optional) - The Type of Storage to provision. Possible values are `/oracle/public/storage/latency` or `/oracle/public/storage/default`. Defaults to `/oracle/public/storage/default`. * `bootable` - (Optional) A `bootable` block as defined below. * `tags` - (Optional) Comma-separated strings that tag the storage volume. +* `snapshot` - (Optional) Name of the storage volume snapshot if this storage volume is a clone. +* `snapshot_account` - (Optional) Account of the parent snapshot from which the storage volume is restored. +* `snapshot_id` - (Optional) Id of the parent snapshot from which the storage volume is restored or cloned. `bootable` supports the following: * `image_list` - (Required) Defines an image list. diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown new file mode 100644 index 000000000..f7ba88e06 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_storage_volume_snapshot" +sidebar_current: "docs-opc-resource-storage-volume-snapshot" +description: |- + Creates and manages a storage volume snapshot in an OPC identity domain. +--- + +# opc\_compute\_storage\_volume_snapshot + +The ``opc_compute_storage_volume_snapshot`` resource creates and manages a storage volume snapshot in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_storage_volume_snapshot" "test" { + name = "storageVolume1" + description = "Description for the Storage Volume" + tags = ["bar", "foo"] + collocated = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `volume` (Required) The name of the storage volume to create the snapshot from. +* `description` (Optional) The description of the storage volume snapshot. +* `name` (Optional) The name of the storage volume snapshot. Will be generated if unspecified. +* `parent_volume_bootable` (Optional) A string value of whether or not the parent volume is 'bootable' or not. Defaults to `"false"`. +* `collocated` (Optional) Boolean specifying whether the snapshot is collocated or remote. Defaults to `false`. +* `tags` - (Optional) Comma-separated strings that tag the storage volume. + +## Attributes Reference + +In addition to the attributes above, the following attributes are exported: + +* `account` - Account to use for snapshots. +* `machine_image_name` - The name of the machine image that's used in the boot volume from which this snapshot is taken. +* `size` - The size of the snapshot in GB. +* `property` - Where the snapshot is stored, whether collocated, or in the Oracle Storage Cloud Service instance. +* `platform` - The OS platform this snapshot is compatible with +* `snapshot_timestamp` - Timestamp of the storage snapshot, generated by storage server. The snapshot will contain data written to the original volume before this time. +* `snapshot_id` - The Oracle ID of the snapshot. +* `start_timestamp` - Timestamp when the snapshot was started. +* `status` - Status of the snapshot. +* `status_detail` - Details about the latest state of the storage volume snapshot. +* `status_timestamp` - Indicates the time that the current view of the storage volume snapshot was generated. +* `uri` - Uniform Resource Identifier + +## Import + +Storage Volume Snapshot's can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_storage_volume_snapshot.volume1 example +``` From 3bd582b3d5798d5441043a729d634f4bd80ae8ed Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 12:29:13 -0400 Subject: [PATCH 027/342] Add fixes from comments - Adds docs to sidebar - Changes `volume` to `volume_name` in volume snapshot - Fixes tests - Changes `parent_volume_bootable` to boolean, and converts to string for API ``` $ make testacc TEST=./builtin/providers/opc TESTARGS="-run=TestAccOPCStorageVolumeSnapshot_basic" ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/04/06 12:26:59 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/opc -v -run=TestAccOPCStorageVolumeSnapshot_basic -timeout 120m === RUN TestAccOPCStorageVolumeSnapshot_basic --- PASS: TestAccOPCStorageVolumeSnapshot_basic (24.45s) PASS ok github.com/hashicorp/terraform/builtin/providers/opc 24.476s ``` --- .../opc/resource_storage_volume_snapshot.go | 24 ++++++++++++------- .../resource_storage_volume_snapshot_test.go | 2 +- .../opc/resource_storage_volume_test.go | 8 +++---- ...pute_storage_volume_snapshot.html.markdown | 3 ++- website/source/layouts/opc.erb | 3 +++ 5 files changed, 26 insertions(+), 14 deletions(-) diff --git a/builtin/providers/opc/resource_storage_volume_snapshot.go b/builtin/providers/opc/resource_storage_volume_snapshot.go index 629dc2f3c..5fb0d9847 100644 --- a/builtin/providers/opc/resource_storage_volume_snapshot.go +++ b/builtin/providers/opc/resource_storage_volume_snapshot.go @@ -2,6 +2,7 @@ package opc import ( "fmt" + "strconv" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/terraform/helper/schema" @@ -18,7 +19,7 @@ func resourceOPCStorageVolumeSnapshot() *schema.Resource { Schema: map[string]*schema.Schema{ // Required Attributes - "volume": { + "volume_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -40,10 +41,10 @@ func resourceOPCStorageVolumeSnapshot() *schema.Resource { }, "parent_volume_bootable": { - Type: schema.TypeString, + Type: schema.TypeBool, Optional: true, ForceNew: true, - Default: "false", + Default: false, }, "collocated": { @@ -124,7 +125,7 @@ func resourceOPCStorageVolumeSnapshotCreate(d *schema.ResourceData, meta interfa // Get required attribute input := &compute.CreateStorageVolumeSnapshotInput{ - Volume: d.Get("volume").(string), + Volume: d.Get("volume_name").(string), } if v, ok := d.GetOk("description"); ok { @@ -135,8 +136,10 @@ func resourceOPCStorageVolumeSnapshotCreate(d *schema.ResourceData, meta interfa input.Name = v.(string) } - if v, ok := d.GetOk("parent_volume_bootable"); ok { - input.ParentVolumeBootable = v.(string) + // Convert parent_volume_bootable to string + bootable := d.Get("parent_volume_bootable").(bool) + if bootable { + input.ParentVolumeBootable = "true" } collocated := d.Get("collocated").(bool) @@ -180,10 +183,9 @@ func resourceOPCStorageVolumeSnapshotRead(d *schema.ResourceData, meta interface return nil } - d.Set("volume", result.Volume) + d.Set("volume_name", result.Volume) d.Set("description", result.Description) d.Set("name", result.Name) - d.Set("parent_volume_bootable", result.ParentVolumeBootable) d.Set("property", result.Property) d.Set("platform", result.Platform) d.Set("account", result.Account) @@ -197,6 +199,12 @@ func resourceOPCStorageVolumeSnapshotRead(d *schema.ResourceData, meta interface d.Set("status_timestamp", result.StatusTimestamp) d.Set("uri", result.URI) + bootable, err := strconv.ParseBool(result.ParentVolumeBootable) + if err != nil { + return fmt.Errorf("Error converting parent volume to boolean: %v", err) + } + d.Set("parent_volume_bootable", bootable) + if result.Property != compute.SnapshotPropertyCollocated { d.Set("collocated", false) } else { diff --git a/builtin/providers/opc/resource_storage_volume_snapshot_test.go b/builtin/providers/opc/resource_storage_volume_snapshot_test.go index 9d0c40798..9d4b08ee0 100644 --- a/builtin/providers/opc/resource_storage_volume_snapshot_test.go +++ b/builtin/providers/opc/resource_storage_volume_snapshot_test.go @@ -82,7 +82,7 @@ resource "opc_compute_storage_volume_snapshot" "test" { name = "test-acc-stor-vol-%d" description = "storage volume snapshot" collocated = true - volume = "${opc_compute_storage_volume.foo.name}" + volume_name = "${opc_compute_storage_volume.foo.name}" } `, rInt, rInt) } diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index 0c6ec0d90..fc157ef94 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -238,10 +238,10 @@ resource "opc_compute_storage_volume" "foo" { } resource "opc_compute_storage_volume_snapshot" "foo" { - description = "testing-acc" - name = "test-acc-stor-snapshot-%d" - collocated = true - volume = "${opc_compute_storage_volume.foo.name}" + description = "testing-acc" + name = "test-acc-stor-snapshot-%d" + collocated = true + volume_name = "${opc_compute_storage_volume.foo.name}" } // Create storage volume from snapshot diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown index f7ba88e06..13ba265a5 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown @@ -18,6 +18,7 @@ resource "opc_compute_storage_volume_snapshot" "test" { description = "Description for the Storage Volume" tags = ["bar", "foo"] collocated = true + volume_name = "${opc_compute_storage_volume.foo.name}" } ``` @@ -25,7 +26,7 @@ resource "opc_compute_storage_volume_snapshot" "test" { The following arguments are supported: -* `volume` (Required) The name of the storage volume to create the snapshot from. +* `volume_name` (Required) The name of the storage volume to create the snapshot from. * `description` (Optional) The description of the storage volume snapshot. * `name` (Optional) The name of the storage volume snapshot. Will be generated if unspecified. * `parent_volume_bootable` (Optional) A string value of whether or not the parent volume is 'bootable' or not. Defaults to `"false"`. diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb index f27a27e8b..0bc3e3ab4 100644 --- a/website/source/layouts/opc.erb +++ b/website/source/layouts/opc.erb @@ -85,6 +85,9 @@ > opc_compute_storage_volume + > + opc_compute_storage_volume_snapshot + > opc_compute_vnic_set From bff6f5d6091869dc49cee9183a6901754ab64ab0 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 12:31:33 -0400 Subject: [PATCH 028/342] Remove unnecessary nil check in return --- builtin/providers/opc/resource_storage_volume_snapshot.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/builtin/providers/opc/resource_storage_volume_snapshot.go b/builtin/providers/opc/resource_storage_volume_snapshot.go index 5fb0d9847..4612c9f95 100644 --- a/builtin/providers/opc/resource_storage_volume_snapshot.go +++ b/builtin/providers/opc/resource_storage_volume_snapshot.go @@ -178,11 +178,6 @@ func resourceOPCStorageVolumeSnapshotRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error reading storage volume snapshot '%s': %v", name, err) } - if result == nil { - d.SetId("") - return nil - } - d.Set("volume_name", result.Volume) d.Set("description", result.Description) d.Set("name", result.Name) From fb89a010af3072c54b108add7627334b51784fb0 Mon Sep 17 00:00:00 2001 From: = Date: Thu, 6 Apr 2017 10:46:14 -0600 Subject: [PATCH 029/342] Adding ip address associations --- .../opc/import_ip_address_association_test.go | 33 +++ builtin/providers/opc/provider.go | 1 + .../opc/resource_ip_address_association.go | 151 +++++++++++ .../resource_ip_address_association_test.go | 158 +++++++++++ .../compute/ip_address_associations.go | 152 +++++++++++ .../compute/storage_volume_snapshots.go | 250 ++++++++++++++++++ .../compute/storage_volumes.go | 1 + vendor/vendor.json | 6 +- ...mpute_ip_address_association.html.markdown | 48 ++++ 9 files changed, 797 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/opc/import_ip_address_association_test.go create mode 100644 builtin/providers/opc/resource_ip_address_association.go create mode 100644 builtin/providers/opc/resource_ip_address_association_test.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go create mode 100644 vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go create mode 100644 website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown diff --git a/builtin/providers/opc/import_ip_address_association_test.go b/builtin/providers/opc/import_ip_address_association_test.go new file mode 100644 index 000000000..7730e6cdd --- /dev/null +++ b/builtin/providers/opc/import_ip_address_association_test.go @@ -0,0 +1,33 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPAddressAssociation_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_address_association.test" + + ri := acctest.RandInt() + config := testAccIPAddressAssociationBasic(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPAddressAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/provider.go b/builtin/providers/opc/provider.go index e1ac86dee..364c12db0 100644 --- a/builtin/providers/opc/provider.go +++ b/builtin/providers/opc/provider.go @@ -71,6 +71,7 @@ func Provider() terraform.ResourceProvider { "opc_compute_vnic_set": resourceOPCVNICSet(), "opc_compute_security_protocol": resourceOPCSecurityProtocol(), "opc_compute_ip_address_prefix_set": resourceOPCIPAddressPrefixSet(), + "opc_compute_ip_address_association": resourceOPCIPAddressAssociation(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/opc/resource_ip_address_association.go b/builtin/providers/opc/resource_ip_address_association.go new file mode 100644 index 000000000..7659fbf34 --- /dev/null +++ b/builtin/providers/opc/resource_ip_address_association.go @@ -0,0 +1,151 @@ +package opc + +import ( + "fmt" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceOPCIPAddressAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceOPCIPAddressAssociationCreate, + Read: resourceOPCIPAddressAssociationRead, + Update: resourceOPCIPAddressAssociationUpdate, + Delete: resourceOPCIPAddressAssociationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ip_address_reservation": { + Type: schema.TypeString, + Optional: true, + }, + "vnic": { + Type: schema.TypeString, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "tags": tagsOptionalSchema(), + "uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceOPCIPAddressAssociationCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).IPAddressAssociations() + + input := compute.CreateIPAddressAssociationInput{ + Name: d.Get("name").(string), + } + + if ipAddressReservation, ok := d.GetOk("ip_address_reservation"); ok { + input.IPAddressReservation = ipAddressReservation.(string) + } + + if vnic, ok := d.GetOk("vnic"); ok { + input.Vnic = vnic.(string) + } + + tags := getStringList(d, "tags") + if len(tags) != 0 { + input.Tags = tags + } + + if description, ok := d.GetOk("description"); ok { + input.Description = description.(string) + } + + info, err := client.CreateIPAddressAssociation(&input) + if err != nil { + return fmt.Errorf("Error creating IP Address Association: %s", err) + } + + d.SetId(info.Name) + return resourceOPCIPAddressAssociationRead(d, meta) +} + +func resourceOPCIPAddressAssociationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).IPAddressAssociations() + + getInput := compute.GetIPAddressAssociationInput{ + Name: d.Id(), + } + result, err := client.GetIPAddressAssociation(&getInput) + if err != nil { + // IP Address Association does not exist + if compute.WasNotFoundError(err) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IP Address Association %s: %s", d.Id(), err) + } + + d.Set("name", result.Name) + d.Set("ip_address_reservation", result.IPAddressReservation) + d.Set("vnic", result.Vnic) + d.Set("description", result.Description) + d.Set("uri", result.Uri) + if err := setStringList(d, "tags", result.Tags); err != nil { + return err + } + return nil +} + +func resourceOPCIPAddressAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).IPAddressAssociations() + + input := compute.UpdateIPAddressAssociationInput{ + Name: d.Get("name").(string), + } + + if ipAddressReservation, ok := d.GetOk("ip_address_reservation"); ok { + input.IPAddressReservation = ipAddressReservation.(string) + } + + if vnic, ok := d.GetOk("vnic"); ok { + input.Vnic = vnic.(string) + } + + tags := getStringList(d, "tags") + if len(tags) != 0 { + input.Tags = tags + } + + if description, ok := d.GetOk("description"); ok { + input.Description = description.(string) + } + + info, err := client.UpdateIPAddressAssociation(&input) + if err != nil { + return fmt.Errorf("Error updating IP Address Association: %s", err) + } + + d.SetId(info.Name) + return resourceOPCIPAddressAssociationRead(d, meta) +} + +func resourceOPCIPAddressAssociationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*compute.Client).IPAddressAssociations() + name := d.Id() + + input := compute.DeleteIPAddressAssociationInput{ + Name: name, + } + if err := client.DeleteIPAddressAssociation(&input); err != nil { + return fmt.Errorf("Error deleting IP Address Association: %s", err) + } + return nil +} diff --git a/builtin/providers/opc/resource_ip_address_association_test.go b/builtin/providers/opc/resource_ip_address_association_test.go new file mode 100644 index 000000000..9c3804d1e --- /dev/null +++ b/builtin/providers/opc/resource_ip_address_association_test.go @@ -0,0 +1,158 @@ +package opc + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-oracle-terraform/compute" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccOPCIPAddressAssociation_Basic(t *testing.T) { + rInt := acctest.RandInt() + resourceName := "opc_compute_ip_address_association.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPAddressAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIPAddressAssociationBasic(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckIPAddressAssociationExists, + resource.TestCheckResourceAttr( + resourceName, "tags.#", "2"), + ), + }, + { + Config: testAccIPAddressAssociationBasic_Update(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + resourceName, "tags.#", "1"), + ), + }, + }, + }) +} + +func TestAccOPCIPAddressAssociation_Full(t *testing.T) { + rInt := acctest.RandInt() + resourceName := "opc_compute_ip_address_association.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIPAddressAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIPAddressAssociationFull(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckIPAddressAssociationExists, + resource.TestCheckResourceAttr( + resourceName, "vnic", fmt.Sprintf("test-vnic-data-%d", rInt)), + resource.TestCheckResourceAttr( + resourceName, "ip_address_reservation", fmt.Sprintf("testing-ip-address-association-%d", rInt)), + ), + }, + }, + }) +} + +func testAccCheckIPAddressAssociationExists(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).IPAddressAssociations() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_ip_address_association" { + continue + } + + input := compute.GetIPAddressAssociationInput{ + Name: rs.Primary.Attributes["name"], + } + if _, err := client.GetIPAddressAssociation(&input); err != nil { + return fmt.Errorf("Error retrieving state of IP Address Association %s: %s", input.Name, err) + } + } + + return nil +} + +func testAccCheckIPAddressAssociationDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*compute.Client).IPAddressAssociations() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opc_compute_ip_address_association" { + continue + } + + input := compute.GetIPAddressAssociationInput{ + Name: rs.Primary.Attributes["name"], + } + if info, err := client.GetIPAddressAssociation(&input); err == nil { + return fmt.Errorf("IP Address Association %s still exists: %#v", input.Name, info) + } + } + + return nil +} + +func testAccIPAddressAssociationBasic(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_ip_address_association" "test" { + name = "testing-acc-%d" + description = "acctesting ip address association test %d" + tags = ["tag1", "tag2"] +}`, rInt, rInt) +} + +func testAccIPAddressAssociationBasic_Update(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_ip_address_association" "test" { + name = "testing-acc-%d" + description = "acctesting ip address association test updated %d" + tags = ["tag1"] +}`, rInt, rInt) +} + +func testAccIPAddressAssociationFull(rInt int) string { + return fmt.Sprintf(` +resource "opc_compute_ip_network" "foo" { + name = "testing-vnic-data-%d" + description = "testing-ip-address-association" + ip_address_prefix = "10.1.13.0/24" +} +resource "opc_compute_instance" "test" { + name = "test-%d" + label = "test" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + networking_info { + index = 0 + ip_network = "${opc_compute_ip_network.foo.id}" + vnic = "test-vnic-data-%d" + shared_network = false + mac_address = "02:5a:cd:ec:2e:4c" + } +} +data "opc_compute_network_interface" "eth0" { + instance_name = "${opc_compute_instance.test.name}" + instance_id = "${opc_compute_instance.test.id}" + interface = "eth0" +} +data "opc_compute_vnic" "foo" { + name = "${data.opc_compute_network_interface.eth0.vnic}" +} +resource "opc_compute_ip_address_reservation" "test" { + name = "testing-ip-address-association-%d" + description = "testing-desc-%d" + ip_address_pool = "public-ippool" +} +resource "opc_compute_ip_address_association" "test" { + name = "testing-acc-%d" + ip_address_reservation = "${opc_compute_ip_address_reservation.test.name}" + vnic = "${data.opc_compute_vnic.foo.name}" + description = "acctesting ip address association test %d" + tags = ["tag1", "tag2"] +}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go new file mode 100644 index 000000000..335ada558 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ip_address_associations.go @@ -0,0 +1,152 @@ +package compute + +const ( + IPAddressAssociationDescription = "ip address association" + IPAddressAssociationContainerPath = "/network/v1/ipassociation/" + IPAddressAssociationResourcePath = "/network/v1/ipassociation" +) + +type IPAddressAssociationsClient struct { + ResourceClient +} + +// IPAddressAssociations() returns an IPAddressAssociationsClient that can be used to access the +// necessary CRUD functions for IP Address Associations. +func (c *Client) IPAddressAssociations() *IPAddressAssociationsClient { + return &IPAddressAssociationsClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: IPAddressAssociationDescription, + ContainerPath: IPAddressAssociationContainerPath, + ResourceRootPath: IPAddressAssociationResourcePath, + }, + } +} + +// IPAddressAssociationInfo contains the exported fields necessary to hold all the information about an +// IP Address Association +type IPAddressAssociationInfo struct { + // The name of the NAT IP address reservation. + IPAddressReservation string `json:"ipAddressReservation"` + // Name of the virtual NIC associated with this NAT IP reservation. + Vnic string `json:"vnic"` + // The name of the IP Address Association + Name string `json:"name"` + // Description of the IP Address Association + Description string `json:"description"` + // Slice of tags associated with the IP Address Association + Tags []string `json:"tags"` + // Uniform Resource Identifier for the IP Address Association + Uri string `json:"uri"` +} + +type CreateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// Create a new IP Address Association from an IPAddressAssociationsClient and an input struct. +// Returns a populated Info struct for the IP Address Association, and any errors +func (c *IPAddressAssociationsClient) CreateIPAddressAssociation(input *CreateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + input.IPAddressReservation = c.getQualifiedName(input.IPAddressReservation) + input.Vnic = c.getQualifiedName(input.Vnic) + + var ipInfo IPAddressAssociationInfo + if err := c.createResource(&input, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type GetIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +// Returns a populated IPAddressAssociationInfo struct from an input struct +func (c *IPAddressAssociationsClient) GetIPAddressAssociation(input *GetIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + input.Name = c.getQualifiedName(input.Name) + + var ipInfo IPAddressAssociationInfo + if err := c.getResource(input.Name, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +// UpdateIPAddressAssociationInput defines what to update in a ip address association +type UpdateIPAddressAssociationInput struct { + // The name of the IP Address Association to create. Object names can only contain alphanumeric, + // underscore, dash, and period characters. Names are case-sensitive. + // Required + Name string `json:"name"` + + // The name of the NAT IP address reservation. + // Optional + IPAddressReservation string `json:"ipAddressReservation,omitempty"` + + // Name of the virtual NIC associated with this NAT IP reservation. + // Optional + Vnic string `json:"vnic,omitempty"` + + // Description of the IPAddressAssociation + // Optional + Description string `json:"description"` + + // String slice of tags to apply to the IP Address Association object + // Optional + Tags []string `json:"tags"` +} + +// UpdateIPAddressAssociation update the ip address association +func (c *IPAddressAssociationsClient) UpdateIPAddressAssociation(updateInput *UpdateIPAddressAssociationInput) (*IPAddressAssociationInfo, error) { + updateInput.Name = c.getQualifiedName(updateInput.Name) + updateInput.IPAddressReservation = c.getQualifiedName(updateInput.IPAddressReservation) + updateInput.Vnic = c.getQualifiedName(updateInput.Vnic) + var ipInfo IPAddressAssociationInfo + if err := c.updateResource(updateInput.Name, updateInput, &ipInfo); err != nil { + return nil, err + } + + return c.success(&ipInfo) +} + +type DeleteIPAddressAssociationInput struct { + // The name of the IP Address Association to query for. Case-sensitive + // Required + Name string `json:"name"` +} + +func (c *IPAddressAssociationsClient) DeleteIPAddressAssociation(input *DeleteIPAddressAssociationInput) error { + return c.deleteResource(input.Name) +} + +// Unqualifies any qualified fields in the IPAddressAssociationInfo struct +func (c *IPAddressAssociationsClient) success(info *IPAddressAssociationInfo) (*IPAddressAssociationInfo, error) { + c.unqualify(&info.Name) + c.unqualify(&info.Vnic) + c.unqualify(&info.IPAddressReservation) + return info, nil +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go new file mode 100644 index 000000000..f958d2107 --- /dev/null +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_snapshots.go @@ -0,0 +1,250 @@ +package compute + +import ( + "fmt" + "strings" +) + +const ( + StorageVolumeSnapshotDescription = "storage volume snapshot" + StorageVolumeSnapshotContainerPath = "/storage/snapshot/" + StorageVolumeSnapshotResourcePath = "/storage/snapshot" + + WaitForSnapshotCreateTimeout = 1200 + WaitForSnapshotDeleteTimeout = 1500 + + // Collocated Snapshot Property + SnapshotPropertyCollocated = "/oracle/private/storage/snapshot/collocated" +) + +// StorageVolumeSnapshotClient is a client for the Storage Volume Snapshot functions of the Compute API. +type StorageVolumeSnapshotClient struct { + ResourceClient +} + +func (c *Client) StorageVolumeSnapshots() *StorageVolumeSnapshotClient { + return &StorageVolumeSnapshotClient{ + ResourceClient: ResourceClient{ + Client: c, + ResourceDescription: StorageVolumeSnapshotDescription, + ContainerPath: StorageVolumeSnapshotContainerPath, + ResourceRootPath: StorageVolumeSnapshotResourcePath, + }, + } +} + +// StorageVolumeSnapshotInfo represents the information retrieved from the service about a storage volume snapshot +type StorageVolumeSnapshotInfo struct { + // Account to use for snapshots + Account string `json:"account"` + + // Description of the snapshot + Description string `json:"description"` + + // The name of the machine image that's used in the boot volume from which this snapshot is taken + MachineImageName string `json:"machineimage_name"` + + // Name of the snapshot + Name string `json:"name"` + + // String indicating whether the parent volume is bootable or not + ParentVolumeBootable string `json:"parent_volume_bootable"` + + // Platform the snapshot is compatible with + Platform string `json:"platform"` + + // String determining whether the snapshot is remote or collocated + Property string `json:"property"` + + // The size of the snapshot in GB + Size string `json:"size"` + + // The ID of the snapshot. Generated by the server + SnapshotID string `json:"snapshot_id"` + + // The timestamp of the storage snapshot + SnapshotTimestamp string `json:"snapshot_timestamp"` + + // Timestamp for when the operation started + StartTimestamp string `json:"start_timestamp"` + + // Status of the snapshot + Status string `json:"status"` + + // Status Detail of the storage snapshot + StatusDetail string `json:"status_detail"` + + // Indicates the time that the current view of the storage volume snapshot was generated. + StatusTimestamp string `json:"status_timestamp"` + + // Array of tags for the snapshot + Tags []string `json:"tags,omitempty"` + + // Uniform Resource Identifier + URI string `json:"uri"` + + // Name of the parent storage volume for the snapshot + Volume string `json:"volume"` +} + +// CreateStorageVolumeSnapshotInput represents the body of an API request to create a new storage volume snapshot +type CreateStorageVolumeSnapshotInput struct { + // Description of the snapshot + // Optional + Description string `json:"description,omitempty"` + + // Name of the snapshot + // Optional, will be generated if not specified + Name string `json:"name,omitempty"` + + // Whether or not the parent volume is bootable + // Optional + ParentVolumeBootable string `json:"parent_volume_bootable,omitempty"` + + // Whether collocated or remote + // Optional, will be remote if unspecified + Property string `json:"property,omitempty"` + + // Array of tags for the snapshot + // Optional + Tags []string `json:"tags,omitempty"` + + // Name of the volume to create the snapshot from + // Required + Volume string `json:"volume"` + + // Timeout (in seconds) to wait for snapshot to be completed. Will use default if unspecified + Timeout int +} + +// CreateStorageVolumeSnapshot creates a snapshot based on the supplied information struct +func (c *StorageVolumeSnapshotClient) CreateStorageVolumeSnapshot(input *CreateStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + if input.Name != "" { + input.Name = c.getQualifiedName(input.Name) + } + input.Volume = c.getQualifiedName(input.Volume) + + var storageSnapshotInfo StorageVolumeSnapshotInfo + if err := c.createResource(&input, &storageSnapshotInfo); err != nil { + return nil, err + } + + timeout := WaitForSnapshotCreateTimeout + if input.Timeout != 0 { + timeout = input.Timeout + } + + // The name of the snapshot could have been generated. Use the response name as input + return c.waitForStorageSnapshotAvailable(storageSnapshotInfo.Name, timeout) +} + +// GetStorageVolumeSnapshotInput represents the body of an API request to get information on a storage volume snapshot +type GetStorageVolumeSnapshotInput struct { + // Name of the snapshot + Name string `json:"name"` +} + +// GetStorageVolumeSnapshot makes an API request to populate information on a storage volume snapshot +func (c *StorageVolumeSnapshotClient) GetStorageVolumeSnapshot(input *GetStorageVolumeSnapshotInput) (*StorageVolumeSnapshotInfo, error) { + var storageSnapshot StorageVolumeSnapshotInfo + input.Name = c.getQualifiedName(input.Name) + if err := c.getResource(input.Name, &storageSnapshot); err != nil { + if WasNotFoundError(err) { + return nil, nil + } + + return nil, err + } + return c.success(&storageSnapshot) +} + +// DeleteStorageVolumeSnapshotInput represents the body of an API request to delete a storage volume snapshot +type DeleteStorageVolumeSnapshotInput struct { + // Name of the snapshot to delete + Name string `json:"name"` + + // Timeout in seconds to wait for deletion, will use default if unspecified + Timeout int +} + +// DeleteStoragevolumeSnapshot makes an API request to delete a storage volume snapshot +func (c *StorageVolumeSnapshotClient) DeleteStorageVolumeSnapshot(input *DeleteStorageVolumeSnapshotInput) error { + input.Name = c.getQualifiedName(input.Name) + + if err := c.deleteResource(input.Name); err != nil { + return err + } + + timeout := WaitForSnapshotDeleteTimeout + if input.Timeout != 0 { + timeout = input.Timeout + } + + return c.waitForStorageSnapshotDeleted(input.Name, timeout) +} + +func (c *StorageVolumeSnapshotClient) success(result *StorageVolumeSnapshotInfo) (*StorageVolumeSnapshotInfo, error) { + c.unqualify(&result.Name) + c.unqualify(&result.Volume) + + sizeInGigaBytes, err := sizeInGigaBytes(result.Size) + if err != nil { + return nil, err + } + result.Size = sizeInGigaBytes + + return result, nil +} + +// Waits for a storage snapshot to become available +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotAvailable(name string, timeout int) (*StorageVolumeSnapshotInfo, error) { + var result *StorageVolumeSnapshotInfo + + err := c.waitFor( + fmt.Sprintf("storage volume snapshot %s to become available", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if err != nil { + return false, err + } + + if res != nil { + result = res + if strings.ToLower(result.Status) == "completed" { + return true, nil + } else if strings.ToLower(result.Status) == "error" { + return false, fmt.Errorf("Snapshot '%s' failed to create successfully. Status: %s Status Detail: %s", result.Name, result.Status, result.StatusDetail) + } + } + + return false, nil + }) + + return result, err +} + +// Waits for a storage snapshot to be deleted +func (c *StorageVolumeSnapshotClient) waitForStorageSnapshotDeleted(name string, timeout int) error { + return c.waitFor( + fmt.Sprintf("storage volume snapshot %s to be deleted", c.getQualifiedName(name)), + timeout, + func() (bool, error) { + req := &GetStorageVolumeSnapshotInput{ + Name: name, + } + res, err := c.GetStorageVolumeSnapshot(req) + if res == nil { + return true, nil + } + + if err != nil { + return false, err + } + + return res == nil, nil + }) +} diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go index c40c354a9..7193a6c24 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go @@ -188,6 +188,7 @@ type GetStorageVolumeInput struct { func (c *StorageVolumeClient) success(result *StorageVolumeInfo) (*StorageVolumeInfo, error) { c.unqualify(&result.Name) + c.unqualify(&result.Snapshot) sizeInMegaBytes, err := sizeInGigaBytes(result.Size) if err != nil { diff --git a/vendor/vendor.json b/vendor/vendor.json index 63c77ed33..0f8bd50ef 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1970,10 +1970,10 @@ "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, { - "checksumSHA1": "QKusHEboSl00AnORqkjv0gZEhqw=", + "checksumSHA1": "mVLpbxsm+8TlXKgkezrh3c5I7+4=", "path": "github.com/hashicorp/go-oracle-terraform/compute", - "revision": "15f277fb824b7af18c6bef8d30d84174154f989b", - "revisionTime": "2017-04-05T20:02:51Z" + "revision": "381402af3554bcca5fa7eeda94d47003e6ba7ee7", + "revisionTime": "2017-04-06T04:33:22Z" }, { "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown new file mode 100644 index 000000000..44429a030 --- /dev/null +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown @@ -0,0 +1,48 @@ +--- +layout: "opc" +page_title: "Oracle: opc_compute_ip_address_association" +sidebar_current: "docs-opc-resource-ip-address-association" +description: |- + Creates and manages an IP address association in an OPC identity domain. +--- + +# opc\_compute\_ip\_address\_association + +The ``opc_compute_ip_address_association`` resource creates and manages an IP address association in an OPC identity domain. + +## Example Usage + +``` +resource "opc_compute_ip_address_association" "default" { + name = "PrefixSet1" + ip_address_reservation = "${opc_compute_ip_address_reservation.default.name}" + vnic = "${data.opc_compute_vnic.default.name}" + tags = ["tags1", "tags2"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the ip address association. + +* `ip_address_reservation` - (Optional) The name of the NAT IP address reservation. + +* `vnic` - (Optional) The name of the virtual NIC associated with this NAT IP reservation. + +* `description` - (Optional) A description of the ip address association. + +* `tags` - (Optional) List of tags that may be applied to the ip address association. + +In addition to the above, the following variables are exported: + +* `uri` - (Computed) The Uniform Resource Identifier of the ip address association. + +## Import + +IP Address Prefix Set can be imported using the `resource name`, e.g. + +``` +terraform import opc_compute_ip_address_association.default example +``` From c51fb79bf370cefc513cbcb81861145202eda1e0 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 13:05:52 -0400 Subject: [PATCH 030/342] provider/opc: Remove 'model' from instance networking Removes `model` as a configurable attribute in instance networking. Also adds missing `name` attribute from `ip_reservation` docs ``` $ make testacc TEST=./builtin/providers/opc TESTARGS="-run=TestAccOPCInstance_ipNetwork" ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/04/06 12:53:13 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/opc -v -run=TestAccOPCInstance_ipNetwork -timeout 120m === RUN TestAccOPCInstance_ipNetwork --- PASS: TestAccOPCInstance_ipNetwork (258.69s) PASS ok github.com/hashicorp/terraform/builtin/providers/opc 258.721s ``` ``` $ make testacc TEST=./builtin/providers/opc TESTARGS="-run=TestAccOPCInstance_sharedNetworking" ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/04/06 12:58:43 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/opc -v -run=TestAccOPCInstance_sharedNetworking -timeout 120m === RUN TestAccOPCInstance_sharedNetworking --- PASS: TestAccOPCInstance_sharedNetworking (253.15s) PASS ok github.com/hashicorp/terraform/builtin/providers/opc 253.180s ``` --- builtin/providers/opc/resource_instance.go | 29 ++----------------- .../providers/opc/resource_instance_test.go | 2 -- .../opc_compute_ip_reservation.html.markdown | 2 ++ 3 files changed, 4 insertions(+), 29 deletions(-) diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go index 8157835ad..a2840829e 100644 --- a/builtin/providers/opc/resource_instance.go +++ b/builtin/providers/opc/resource_instance.go @@ -128,20 +128,6 @@ func resourceInstance() *schema.Resource { Optional: true, }, - "model": { - // Required, Shared Network only. - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "e1000" { - errors = append(errors, fmt.Errorf("Model needs to be set to 'e1000', got: %s", value)) - } - return - }, - }, - "name_servers": { // Optional, IP Network + Shared Network Type: schema.TypeList, @@ -205,7 +191,6 @@ func resourceInstance() *schema.Resource { buf.WriteString(fmt.Sprintf("%d-", m["index"].(int))) buf.WriteString(fmt.Sprintf("%s-", m["vnic"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["nat"])) - buf.WriteString(fmt.Sprintf("%s-", m["model"].(string))) return hashcode.String(buf.String()) }, }, @@ -618,6 +603,8 @@ func readNetworkInterfacesFromConfig(d *schema.ResourceData) (map[string]compute if ni["shared_network"].(bool) { // Populate shared network parameters info, err = readSharedNetworkFromConfig(ni) + // Set 'model' since we're configuring a shared network interface + info.Model = compute.NICDefaultModel } else { // Populate IP Network Parameters info, err = readIPNetworkFromConfig(ni) @@ -703,7 +690,6 @@ func readSharedNetworkFromConfig(ni map[string]interface{}) (compute.NetworkingI // function based off of multiple fields in the supplied schema. func validateSharedNetwork(ni map[string]interface{}) error { // A Shared Networking Interface MUST have the following attributes set: - // - "model" // - "nat" // The following attributes _cannot_ be set for a shared network: // - "ip_address" @@ -711,9 +697,6 @@ func validateSharedNetwork(ni map[string]interface{}) error { // - "mac_address" // - "vnic" // - "vnic_sets" - if d, ok := ni["model"]; !ok || d.(string) == "" { - return fmt.Errorf("'model' field needs to be set for a Shared Networking Interface") - } if _, ok := ni["nat"]; !ok { return fmt.Errorf("'nat' field needs to be set for a Shared Networking Interface") @@ -820,19 +803,12 @@ func readIPNetworkFromConfig(ni map[string]interface{}) (compute.NetworkingInfo, func validateIPNetwork(ni map[string]interface{}) error { // An IP Networking Interface MUST have the following attributes set: // - "ip_network" - // The following attributes _cannot_ be set for an IP Network: - // - "model" // Required to be set if d, ok := ni["ip_network"]; !ok || d.(string) == "" { return fmt.Errorf("'ip_network' field is required for an IP Network interface") } - // Requird to be unset - if d, ok := ni["model"]; ok && d.(string) != "" { - return fmt.Errorf("'model' cannot be set in an IP Network Interface") - } - return nil } @@ -872,7 +848,6 @@ func readNetworkInterfaces(d *schema.ResourceData, ifaces map[string]compute.Net res["mac_address"] = iface.MACAddress } if iface.Model != "" { - res["model"] = iface.Model // Model can only be set on Shared networks res["shared_network"] = true } diff --git a/builtin/providers/opc/resource_instance_test.go b/builtin/providers/opc/resource_instance_test.go index 36b214db4..3c4e0ffe6 100644 --- a/builtin/providers/opc/resource_instance_test.go +++ b/builtin/providers/opc/resource_instance_test.go @@ -67,7 +67,6 @@ func TestAccOPCInstance_sharedNetworking(t *testing.T) { // Check Data Source to validate networking attributes resource.TestCheckResourceAttr(dataName, "shared_network", "true"), resource.TestCheckResourceAttr(dataName, "nat.#", "1"), - resource.TestCheckResourceAttr(dataName, "model", "e1000"), resource.TestCheckResourceAttr(dataName, "sec_lists.#", "1"), resource.TestCheckResourceAttr(dataName, "name_servers.#", "0"), resource.TestCheckResourceAttr(dataName, "vnic_sets.#", "0"), @@ -205,7 +204,6 @@ resource "opc_compute_instance" "test" { tags = ["tag1", "tag2"] networking_info { index = 0 - model = "e1000" nat = ["ippool:/oracle/public/ippool"] shared_network = true } diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown index 1201fe9cc..faf2184aa 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown @@ -30,6 +30,8 @@ The following arguments are supported: (if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or deleted and recreated (if false). +* `name` - (Optional) Name of the IP Reservation. Will be generated if unspecified. + * `tags` - (Optional) List of tags that may be applied to the IP reservation. ## Import From 3433850c9c92efe3a8b575e7cc33c4bd5e0987c3 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 13:59:16 -0400 Subject: [PATCH 031/342] provider/opc: Add debug logging Adds debug logging, fixes a failing instance acceptance test, and updates vendored dep --- builtin/providers/opc/config.go | 24 +++++++++++++++++-- builtin/providers/opc/provider.go | 1 + .../providers/opc/resource_instance_test.go | 2 +- .../go-oracle-terraform/compute/client.go | 2 ++ .../go-oracle-terraform/compute/logging.go | 2 +- .../compute/storage_volumes.go | 1 + vendor/vendor.json | 6 ++--- 7 files changed, 31 insertions(+), 7 deletions(-) diff --git a/builtin/providers/opc/config.go b/builtin/providers/opc/config.go index 5c473e591..b25167fe9 100644 --- a/builtin/providers/opc/config.go +++ b/builtin/providers/opc/config.go @@ -2,11 +2,14 @@ package opc import ( "fmt" - "net/http" + "log" "net/url" + "strings" + "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/go-oracle-terraform/opc" + "github.com/hashicorp/terraform/helper/logging" ) type Config struct { @@ -33,9 +36,26 @@ func (c *Config) Client() (*compute.Client, error) { Username: &c.User, Password: &c.Password, APIEndpoint: u, - HTTPClient: http.DefaultClient, + HTTPClient: cleanhttp.DefaultClient(), + } + + if logging.IsDebugOrHigher() { + config.LogLevel = opc.LogDebug + config.Logger = opcLogger{} } // TODO: http client wrapping / log level return compute.NewComputeClient(&config) } + +type opcLogger struct{} + +func (l opcLogger) Log(args ...interface{}) { + tokens := make([]string, 0, len(args)) + for _, arg := range args { + if token, ok := arg.(string); ok { + tokens = append(tokens, token) + } + } + log.Printf("[DEBUG] [go-oracle-terraform]: %s", strings.Join(tokens, " ")) +} diff --git a/builtin/providers/opc/provider.go b/builtin/providers/opc/provider.go index 579d5ac7b..ddde50d5a 100644 --- a/builtin/providers/opc/provider.go +++ b/builtin/providers/opc/provider.go @@ -36,6 +36,7 @@ func Provider() terraform.ResourceProvider { Description: "The HTTP endpoint for OPC API operations.", }, + // TODO Actually implement this "max_retry_timeout": { Type: schema.TypeInt, Optional: true, diff --git a/builtin/providers/opc/resource_instance_test.go b/builtin/providers/opc/resource_instance_test.go index 3c4e0ffe6..2cc61eb39 100644 --- a/builtin/providers/opc/resource_instance_test.go +++ b/builtin/providers/opc/resource_instance_test.go @@ -188,7 +188,7 @@ resource "opc_compute_instance" "test" { image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" instance_attributes = < Date: Thu, 6 Apr 2017 16:28:50 -0600 Subject: [PATCH 034/342] final nit fix --- builtin/providers/opc/resource_ip_address_association.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/builtin/providers/opc/resource_ip_address_association.go b/builtin/providers/opc/resource_ip_address_association.go index 32ebf43d9..de7dec4db 100644 --- a/builtin/providers/opc/resource_ip_address_association.go +++ b/builtin/providers/opc/resource_ip_address_association.go @@ -79,9 +79,10 @@ func resourceOPCIPAddressAssociationCreate(d *schema.ResourceData, meta interfac func resourceOPCIPAddressAssociationRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*compute.Client).IPAddressAssociations() + name := d.Id() getInput := compute.GetIPAddressAssociationInput{ - Name: d.Id(), + name, } result, err := client.GetIPAddressAssociation(&getInput) if err != nil { @@ -90,11 +91,11 @@ func resourceOPCIPAddressAssociationRead(d *schema.ResourceData, meta interface{ d.SetId("") return nil } - return fmt.Errorf("Error reading IP Address Association %s: %s", getInput.Name, err) + return fmt.Errorf("Error reading IP Address Association %s: %s", name, err) } if result == nil { d.SetId("") - return fmt.Errorf("Error reading IP Address Association %s: %s", getInput.Name, err) + return fmt.Errorf("Error reading IP Address Association %s: %s", name, err) } d.Set("name", result.Name) From 76b224ea0a9b5b4c8e3029468aa1bb0f9f6de446 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Thu, 6 Apr 2017 20:43:42 -0400 Subject: [PATCH 035/342] Update IP Association Documentation, add sidebar --- .../opc/r/opc_compute_ip_address_association.html.markdown | 4 ++-- website/source/layouts/opc.erb | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown index b531cf559..a592c27db 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown @@ -3,12 +3,12 @@ layout: "opc" page_title: "Oracle: opc_compute_ip_address_association" sidebar_current: "docs-opc-resource-ip-address-association" description: |- - Creates and manages an IP address association in an OPC identity domain. + Creates and manages an IP address association in an OPC identity domain, for an IP Network. --- # opc\_compute\_ip\_address\_association -The ``opc_compute_ip_address_association`` resource creates and manages an IP address association between an IP address reservation and a virtual NIC in an OPC identity domain. +The ``opc_compute_ip_address_association`` resource creates and manages an IP address association between an IP address reservation and a virtual NIC in an OPC identity domain, for an IP Network. ## Example Usage diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb index 0bc3e3ab4..706acbbb5 100644 --- a/website/source/layouts/opc.erb +++ b/website/source/layouts/opc.erb @@ -37,6 +37,9 @@ > opc_compute_instance + > + opc_compute_ip_address_association + > opc_compute_ip_address_prefix_set From 2332256af635fbfed736bf55e104d75db825d131 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 6 Apr 2017 19:12:20 +0100 Subject: [PATCH 036/342] Refactoring Bootable Storage Volumes --- .../providers/opc/resource_storage_volume.go | 131 +++++++----------- .../opc/resource_storage_volume_test.go | 80 ++++++----- 2 files changed, 86 insertions(+), 125 deletions(-) diff --git a/builtin/providers/opc/resource_storage_volume.go b/builtin/providers/opc/resource_storage_volume.go index 28f3e7e05..0d101f649 100644 --- a/builtin/providers/opc/resource_storage_volume.go +++ b/builtin/providers/opc/resource_storage_volume.go @@ -42,47 +42,24 @@ func resourceOPCStorageVolume() *schema.Resource { }, true), }, - "snapshot": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "snapshot_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "snapshot_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "bootable": { - Type: schema.TypeList, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "image_list": { + Type: schema.TypeString, Optional: true, ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "image_list": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + }, - "image_list_entry": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: -1, - }, - }, - }, + "image_list_entry": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: -1, }, "tags": tagsOptionalSchema(), @@ -139,27 +116,29 @@ func resourceOPCStorageVolumeCreate(d *schema.ResourceData, meta interface{}) er description := d.Get("description").(string) size := d.Get("size").(int) storageType := d.Get("storage_type").(string) + bootable := d.Get("bootable").(bool) + imageList := d.Get("image_list").(string) + imageListEntry := d.Get("image_list_entry").(int) + + if bootable == true { + if imageList == "" { + return fmt.Errorf("Error: A Bootable Volume must have an Image List!") + } + + if imageListEntry == -1 { + return fmt.Errorf("Error: A Bootable Volume must have an Image List Entry!") + } + } input := compute.CreateStorageVolumeInput{ - Name: name, - Description: description, - Size: strconv.Itoa(size), - Properties: []string{storageType}, - Tags: getStringList(d, "tags"), - } - - expandOPCStorageVolumeOptionalFields(d, &input) - - if v, ok := d.GetOk("snapshot"); ok { - input.Snapshot = v.(string) - } - - if v, ok := d.GetOk("snapshot_account"); ok { - input.SnapshotAccount = v.(string) - } - - if v, ok := d.GetOk("snapshot_id"); ok { - input.SnapshotID = v.(string) + Name: name, + Description: description, + Size: strconv.Itoa(size), + Properties: []string{storageType}, + Bootable: bootable, + ImageList: imageList, + ImageListEntry: imageListEntry, + Tags: getStringList(d, "tags"), } info, err := client.CreateStorageVolume(&input) @@ -178,13 +157,17 @@ func resourceOPCStorageVolumeUpdate(d *schema.ResourceData, meta interface{}) er description := d.Get("description").(string) size := d.Get("size").(int) storageType := d.Get("storage_type").(string) + imageList := d.Get("image_list").(string) + imageListEntry := d.Get("image_list_entry").(int) input := compute.UpdateStorageVolumeInput{ - Name: name, - Description: description, - Size: strconv.Itoa(size), - Properties: []string{storageType}, - Tags: getStringList(d, "tags"), + Name: name, + Description: description, + Size: strconv.Itoa(size), + Properties: []string{storageType}, + ImageList: imageList, + ImageListEntry: imageListEntry, + Tags: getStringList(d, "tags"), } _, err := client.UpdateStorageVolume(&input) if err != nil { @@ -220,21 +203,19 @@ func resourceOPCStorageVolumeRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", result.Name) d.Set("description", result.Description) d.Set("storage", result.Properties[0]) - d.Set("snapshot", result.Snapshot) - d.Set("snapshot_id", result.SnapshotID) - d.Set("snapshot_account", result.SnapshotAccount) size, err := strconv.Atoi(result.Size) if err != nil { return err } d.Set("size", size) + d.Set("bootable", result.Bootable) + d.Set("image_list", result.ImageList) + d.Set("image_list_entry", result.ImageListEntry) if err := setStringList(d, "tags", result.Tags); err != nil { return err } - flattenOPCStorageVolumeOptionalFields(d, result) - flattenOPCStorageVolumeComputedFields(d, result) return nil @@ -255,24 +236,6 @@ func resourceOPCStorageVolumeDelete(d *schema.ResourceData, meta interface{}) er return nil } -func expandOPCStorageVolumeOptionalFields(d *schema.ResourceData, input *compute.CreateStorageVolumeInput) { - bootValue, bootExists := d.GetOk("bootable") - input.Bootable = bootExists - if bootExists { - configs := bootValue.([]interface{}) - config := configs[0].(map[string]interface{}) - - input.ImageList = config["image_list"].(string) - input.ImageListEntry = config["image_list_entry"].(int) - } -} - -func flattenOPCStorageVolumeOptionalFields(d *schema.ResourceData, result *compute.StorageVolumeInfo) { - d.Set("bootable", result.Bootable) - d.Set("image_list", result.ImageList) - d.Set("image_list_entry", result.ImageListEntry) -} - func flattenOPCStorageVolumeComputedFields(d *schema.ResourceData, result *compute.StorageVolumeInfo) { d.Set("hypervisor", result.Hypervisor) d.Set("machine_image", result.MachineImage) diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index fc157ef94..122380f16 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -116,9 +116,10 @@ func TestAccOPCStorageVolume_Bootable(t *testing.T) { }) } -func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { +func TestAccOPCStorageVolume_ImageListEntry(t *testing.T) { volumeResourceName := "opc_compute_storage_volume.test" - rInt := acctest.RandInt() + ri := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeImageListEntry, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -126,13 +127,9 @@ func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { CheckDestroy: opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeDestroyed), Steps: []resource.TestStep{ { - Config: testAccStorageVolumeFromSnapshot(rInt), + Config: config, Check: resource.ComposeTestCheckFunc( opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeExists), - resource.TestCheckResourceAttr(volumeResourceName, "name", fmt.Sprintf("test-acc-stor-vol-final-%d", rInt)), - resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot"), - resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot_id"), - resource.TestCheckResourceAttr(volumeResourceName, "size", "5"), ), }, }, @@ -206,17 +203,44 @@ resource "opc_compute_storage_volume" "test" { const testAccStorageVolumeBootable = ` resource "opc_compute_image_list" "test" { name = "test-acc-stor-vol-bootable-image-list-%d" - description = "Provider Acceptance Tests Storage Volume" + description = "Provider Acceptance Tests Storage Volume Bootable" +} + +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + version = 1 } resource "opc_compute_storage_volume" "test" { - name = "test-acc-stor-vol-bootable-%d" - description = "Provider Acceptance Tests Storage Volume" - size = 2 - tags = ["bar", "foo"] - bootable { - image_list = "${opc_compute_image_list.test.name}" - } + name = "test-acc-stor-vol-bootable-%d" + description = "Provider Acceptance Tests Storage Volume Bootable" + size = 20 + tags = ["bar", "foo"] + bootable = true + image_list = "${opc_compute_image_list.test.name}" + image_list_entry = "${opc_compute_image_list_entry.test.version}" +} +` + +const testAccStorageVolumeImageListEntry = ` +resource "opc_compute_image_list" "test" { + name = "test-acc-stor-vol-bootable-image-list-%d" + description = "Provider Acceptance Tests Storage Volume Image List Entry" +} + +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + version = 1 +} + +resource "opc_compute_storage_volume" "test" { + name = "test-acc-stor-vol-bootable-%d" + description = "Provider Acceptance Tests Storage Volume Image List Entry" + size = 20 + tags = ["bar", "foo"] + image_list_entry = "${opc_compute_image_list_entry.test.version}" } ` @@ -227,29 +251,3 @@ resource "opc_compute_storage_volume" "test" { size = 2048 } ` - -func testAccStorageVolumeFromSnapshot(rInt int) string { - return fmt.Sprintf(` -// Initial Storage Volume to create snapshot with -resource "opc_compute_storage_volume" "foo" { - name = "test-acc-stor-vol-%d" - description = "Acc Test intermediary storage volume for snapshot" - size = 5 -} - -resource "opc_compute_storage_volume_snapshot" "foo" { - description = "testing-acc" - name = "test-acc-stor-snapshot-%d" - collocated = true - volume_name = "${opc_compute_storage_volume.foo.name}" -} - -// Create storage volume from snapshot -resource "opc_compute_storage_volume" "test" { - name = "test-acc-stor-vol-final-%d" - description = "storage volume from snapshot" - size = 5 - snapshot_id = "${opc_compute_storage_volume_snapshot.foo.snapshot_id}" -} -`, rInt, rInt, rInt) -} From 222a03da547bd83b69a74832dd533b281f3f34ad Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 6 Apr 2017 19:12:38 +0100 Subject: [PATCH 037/342] Updating the documentation --- .../opc_compute_storage_volume.html.markdown | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index c4a682f41..e1dd3d13b 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -30,14 +30,20 @@ resource "opc_compute_image_list" "test" { description = "Description for the Image List" } +resource "opc_compute_image_list_entry" "test" { + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] + version = 1 +} + resource "opc_compute_storage_volume" "test" { - name = "storageVolume1" - description = "Description for the Bootable Storage Volume" - size = 30 - tags = ["first", "second"] - bootable { - image_list = "${opc_compute_image_list.test.name}" - } + name = "storageVolume1" + description = "Description for the Bootable Storage Volume" + size = 30 + tags = ["first", "second"] + bootable = true + image_list = "${opc_compute_image_list.test.name}" + image_list_entry = "${opc_compute_image_list_entry.test.version}" } ``` @@ -49,15 +55,10 @@ The following arguments are supported: * `description` (Optional) The description of the storage volume. * `size` (Required) The size of this storage volume in GB. The allowed range is from 1 GB to 2 TB (2048 GB). * `storage_type` - (Optional) - The Type of Storage to provision. Possible values are `/oracle/public/storage/latency` or `/oracle/public/storage/default`. Defaults to `/oracle/public/storage/default`. -* `bootable` - (Optional) A `bootable` block as defined below. +* `bootable` - (Optional) Is the Volume Bootable? Defaults to `false`. +* `image_list` - (Required) Defines an image list. Required if `bootable` is set to `true`. +* `image_list_entry` - (Optional) Defines an image list entry. Required if `bootable` is set to `true`. * `tags` - (Optional) Comma-separated strings that tag the storage volume. -* `snapshot` - (Optional) Name of the storage volume snapshot if this storage volume is a clone. -* `snapshot_account` - (Optional) Account of the parent snapshot from which the storage volume is restored. -* `snapshot_id` - (Optional) Id of the parent snapshot from which the storage volume is restored or cloned. - -`bootable` supports the following: -* `image_list` - (Required) Defines an image list. -* `image_list_entry` - (Optional) Defines an image list entry. ## Attributes Reference From d881535fb98107002f9e5e19ed2a8c2304fa76a8 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 6 Apr 2017 19:12:48 +0100 Subject: [PATCH 038/342] GoVendor sync --- vendor/vendor.json | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 71ab6e7e7..202201f2b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1976,16 +1976,14 @@ "revisionTime": "2017-04-06T17:51:51Z" }, { - "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", "path": "github.com/hashicorp/go-oracle-terraform/helper", - "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", - "revisionTime": "2017-03-29T21:19:34Z" + "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", + "revisionTime": "2017-04-06T17:51:51Z" }, { - "checksumSHA1": "AyNRs19Es9pDw2VMxVKWuLx3Afg=", "path": "github.com/hashicorp/go-oracle-terraform/opc", - "revision": "98fdaf3c4bde245e21947487ba722c3d0abaccb2", - "revisionTime": "2017-03-29T21:19:34Z" + "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", + "revisionTime": "2017-04-06T17:51:51Z" }, { "checksumSHA1": "b0nQutPMJHeUmz4SjpreotAo6Yk=", From 41bf29aa6e96cf8f470603ef0720d1b5a1563062 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 6 Apr 2017 19:38:21 +0100 Subject: [PATCH 039/342] re-govendor syncing --- vendor/vendor.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vendor/vendor.json b/vendor/vendor.json index 202201f2b..83c6643f9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1976,11 +1976,13 @@ "revisionTime": "2017-04-06T17:51:51Z" }, { + "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", "path": "github.com/hashicorp/go-oracle-terraform/helper", "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", "revisionTime": "2017-04-06T17:51:51Z" }, { + "checksumSHA1": "AyNRs19Es9pDw2VMxVKWuLx3Afg=", "path": "github.com/hashicorp/go-oracle-terraform/opc", "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", "revisionTime": "2017-04-06T17:51:51Z" From c4357c29c6aa48d62ed019bed8b3e5c968433ea4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 09:05:28 +0100 Subject: [PATCH 040/342] Fixing the documentation --- .../opc/r/opc_compute_storage_volume.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index e1dd3d13b..befd2301e 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -42,7 +42,7 @@ resource "opc_compute_storage_volume" "test" { size = 30 tags = ["first", "second"] bootable = true - image_list = "${opc_compute_image_list.test.name}" + image_list = "${opc_compute_image_list.test.name}" image_list_entry = "${opc_compute_image_list_entry.test.version}" } ``` @@ -56,8 +56,8 @@ The following arguments are supported: * `size` (Required) The size of this storage volume in GB. The allowed range is from 1 GB to 2 TB (2048 GB). * `storage_type` - (Optional) - The Type of Storage to provision. Possible values are `/oracle/public/storage/latency` or `/oracle/public/storage/default`. Defaults to `/oracle/public/storage/default`. * `bootable` - (Optional) Is the Volume Bootable? Defaults to `false`. -* `image_list` - (Required) Defines an image list. Required if `bootable` is set to `true`. -* `image_list_entry` - (Optional) Defines an image list entry. Required if `bootable` is set to `true`. +* `image_list` - (Optional) Defines an image list. Required if `bootable` is set to `true`, optional if set to `false`. +* `image_list_entry` - (Optional) Defines an image list entry. Required if `bootable` is set to `true`, optional if set to `false`. * `tags` - (Optional) Comma-separated strings that tag the storage volume. ## Attributes Reference From 41595582dcce335a8965122e4963a54fa88d7ad6 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 09:23:03 +0100 Subject: [PATCH 041/342] adding back in snapshots --- .../providers/opc/resource_storage_volume.go | 34 +++++++++++++ .../opc/resource_storage_volume_test.go | 49 +++++++++++++++++++ 2 files changed, 83 insertions(+) diff --git a/builtin/providers/opc/resource_storage_volume.go b/builtin/providers/opc/resource_storage_volume.go index 0d101f649..901404366 100644 --- a/builtin/providers/opc/resource_storage_volume.go +++ b/builtin/providers/opc/resource_storage_volume.go @@ -42,6 +42,26 @@ func resourceOPCStorageVolume() *schema.Resource { }, true), }, + "snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "bootable": { Type: schema.TypeBool, Optional: true, @@ -141,6 +161,16 @@ func resourceOPCStorageVolumeCreate(d *schema.ResourceData, meta interface{}) er Tags: getStringList(d, "tags"), } + if v, ok := d.GetOk("snapshot"); ok { + input.Snapshot = v.(string) + } + if v, ok := d.GetOk("snapshot_account"); ok { + input.SnapshotAccount = v.(string) + } + if v, ok := d.GetOk("snapshot_id"); ok { + input.SnapshotID = v.(string) + } + info, err := client.CreateStorageVolume(&input) if err != nil { return fmt.Errorf("Error creating storage volume %s: %s", name, err) @@ -212,6 +242,10 @@ func resourceOPCStorageVolumeRead(d *schema.ResourceData, meta interface{}) erro d.Set("image_list", result.ImageList) d.Set("image_list_entry", result.ImageListEntry) + d.Set("snapshot", result.Snapshot) + d.Set("snapshot_id", result.SnapshotID) + d.Set("snapshot_account", result.SnapshotAccount) + if err := setStringList(d, "tags", result.Tags); err != nil { return err } diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index 122380f16..40a56ecde 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -136,6 +136,30 @@ func TestAccOPCStorageVolume_ImageListEntry(t *testing.T) { }) } +func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { + volumeResourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccStorageVolumeFromSnapshot(rInt), + Check: resource.ComposeTestCheckFunc( + opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeExists), + resource.TestCheckResourceAttr(volumeResourceName, "name", fmt.Sprintf("test-acc-stor-vol-final-%d", rInt)), + resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot"), + resource.TestCheckResourceAttrSet(volumeResourceName, "snapshot_id"), + resource.TestCheckResourceAttr(volumeResourceName, "size", "5"), + ), + }, + }, + }) +} + func testAccCheckStorageVolumeExists(state *OPCResourceState) error { sv := state.Client.StorageVolumes() volumeName := state.Attributes["name"] @@ -251,3 +275,28 @@ resource "opc_compute_storage_volume" "test" { size = 2048 } ` + +func testAccStorageVolumeFromSnapshot(rInt int) string { + return fmt.Sprintf(` + // Initial Storage Volume to create snapshot with + resource "opc_compute_storage_volume" "foo" { + name = "test-acc-stor-vol-%d" + description = "Acc Test intermediary storage volume for snapshot" + size = 5 + } + + resource "opc_compute_storage_volume_snapshot" "foo" { + description = "testing-acc" + name = "test-acc-stor-snapshot-%d" + collocated = true + volume_name = "${opc_compute_storage_volume.foo.name}" + } + + // Create storage volume from snapshot + resource "opc_compute_storage_volume" "test" { + name = "test-acc-stor-vol-final-%d" + description = "storage volume from snapshot" + size = 5 + snapshot_id = "${opc_compute_storage_volume_snapshot.foo.snapshot_id}" + }`, rInt, rInt, rInt) +} From 6d859479ddea886eda75c822b6453c411663970d Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 10:36:04 +0100 Subject: [PATCH 042/342] Import support for Storage Volumes --- .../opc/import_storage_volume_test.go | 141 ++++++++++++++++++ .../providers/opc/resource_storage_volume.go | 5 +- .../opc/resource_storage_volume_test.go | 18 ++- 3 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 builtin/providers/opc/import_storage_volume_test.go diff --git a/builtin/providers/opc/import_storage_volume_test.go b/builtin/providers/opc/import_storage_volume_test.go new file mode 100644 index 000000000..f6af66226 --- /dev/null +++ b/builtin/providers/opc/import_storage_volume_test.go @@ -0,0 +1,141 @@ +package opc + +import ( + "testing" + + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCStorageVolume_importBasic(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeBasic, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCStorageVolume_importComplete(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeComplete, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCStorageVolume_importMaxSize(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeBasicMaxSize, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCStorageVolume_importBootable(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeBootable, rInt, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCStorageVolume_importImageListEntry(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := fmt.Sprintf(testAccStorageVolumeBootable, rInt, rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccOPCStorageVolume_importFromSnapshot(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := testAccStorageVolumeFromSnapshot(rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/resource_storage_volume.go b/builtin/providers/opc/resource_storage_volume.go index 901404366..72a2b969c 100644 --- a/builtin/providers/opc/resource_storage_volume.go +++ b/builtin/providers/opc/resource_storage_volume.go @@ -15,6 +15,9 @@ func resourceOPCStorageVolume() *schema.Resource { Read: resourceOPCStorageVolumeRead, Update: resourceOPCStorageVolumeUpdate, Delete: resourceOPCStorageVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -232,7 +235,7 @@ func resourceOPCStorageVolumeRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", result.Name) d.Set("description", result.Description) - d.Set("storage", result.Properties[0]) + d.Set("storage_type", result.Properties[0]) size, err := strconv.Atoi(result.Size) if err != nil { return err diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index 40a56ecde..a9baefced 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -160,6 +160,8 @@ func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { }) } +// TODO: test Premium storage + func testAccCheckStorageVolumeExists(state *OPCResourceState) error { sv := state.Client.StorageVolumes() volumeName := state.Attributes["name"] @@ -270,9 +272,9 @@ resource "opc_compute_storage_volume" "test" { const testAccStorageVolumeBasicMaxSize = ` resource "opc_compute_storage_volume" "test" { - name = "test-acc-stor-vol-%d" + name = "test-acc-stor-vol-%d" description = "Provider Acceptance Tests Storage Volume Max Size" - size = 2048 + size = 2048 } ` @@ -280,23 +282,23 @@ func testAccStorageVolumeFromSnapshot(rInt int) string { return fmt.Sprintf(` // Initial Storage Volume to create snapshot with resource "opc_compute_storage_volume" "foo" { - name = "test-acc-stor-vol-%d" + name = "test-acc-stor-vol-%d" description = "Acc Test intermediary storage volume for snapshot" - size = 5 + size = 5 } resource "opc_compute_storage_volume_snapshot" "foo" { description = "testing-acc" - name = "test-acc-stor-snapshot-%d" - collocated = true + name = "test-acc-stor-snapshot-%d" + collocated = true volume_name = "${opc_compute_storage_volume.foo.name}" } // Create storage volume from snapshot resource "opc_compute_storage_volume" "test" { - name = "test-acc-stor-vol-final-%d" + name = "test-acc-stor-vol-final-%d" description = "storage volume from snapshot" - size = 5 + size = 5 snapshot_id = "${opc_compute_storage_volume_snapshot.foo.snapshot_id}" }`, rInt, rInt, rInt) } From 41d4bc16224179ad7caf5eeedb3be8348ce6dc76 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 10:42:27 +0100 Subject: [PATCH 043/342] Adding tests covering low latency storage --- .../opc/import_storage_volume_test.go | 22 +++++++++++++ .../opc/resource_storage_volume_test.go | 33 +++++++++++++++++-- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/builtin/providers/opc/import_storage_volume_test.go b/builtin/providers/opc/import_storage_volume_test.go index f6af66226..fdd871462 100644 --- a/builtin/providers/opc/import_storage_volume_test.go +++ b/builtin/providers/opc/import_storage_volume_test.go @@ -118,6 +118,28 @@ func TestAccOPCStorageVolume_importImageListEntry(t *testing.T) { }) } +func TestAccOPCStorageVolume_importLowLatency(t *testing.T) { + resourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + config := testAccStorageVolumeLowLatency(rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccOPCStorageVolume_importFromSnapshot(t *testing.T) { resourceName := "opc_compute_storage_volume.test" rInt := acctest.RandInt() diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index a9baefced..cf2209802 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -136,6 +136,27 @@ func TestAccOPCStorageVolume_ImageListEntry(t *testing.T) { }) } +func TestAccOPCStorageVolume_LowLatency(t *testing.T) { + volumeResourceName := "opc_compute_storage_volume.test" + rInt := acctest.RandInt() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccStorageVolumeLowLatency(rInt), + Check: resource.ComposeTestCheckFunc( + opcResourceCheck(volumeResourceName, testAccCheckStorageVolumeExists), + resource.TestCheckResourceAttr(volumeResourceName, "storage_type", "/oracle/public/storage/latency"), + ), + }, + }, + }) +} + func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { volumeResourceName := "opc_compute_storage_volume.test" rInt := acctest.RandInt() @@ -160,8 +181,6 @@ func TestAccOPCStorageVolume_FromSnapshot(t *testing.T) { }) } -// TODO: test Premium storage - func testAccCheckStorageVolumeExists(state *OPCResourceState) error { sv := state.Client.StorageVolumes() volumeName := state.Attributes["name"] @@ -302,3 +321,13 @@ func testAccStorageVolumeFromSnapshot(rInt int) string { snapshot_id = "${opc_compute_storage_volume_snapshot.foo.snapshot_id}" }`, rInt, rInt, rInt) } + +func testAccStorageVolumeLowLatency(rInt int) string { + return fmt.Sprintf(` + resource "opc_compute_storage_volume" "test" { + name = "test-acc-stor-vol-ll-%d" + description = "Acc Test Storage Volume Low Latency" + storage_type = "/oracle/public/storage/latency" + size = 5 + }`, rInt) +} From 18dc5559badb76b3727410445476e13db84f8eb7 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 11:06:51 +0100 Subject: [PATCH 044/342] Import support for Image Lists --- .../providers/opc/import_ip_network_test.go | 33 +++++++++++++++++++ builtin/providers/opc/resource_ip_network.go | 3 ++ .../providers/opc/resource_ip_network_test.go | 30 +++++++++++++++-- 3 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/opc/import_ip_network_test.go diff --git a/builtin/providers/opc/import_ip_network_test.go b/builtin/providers/opc/import_ip_network_test.go new file mode 100644 index 000000000..6311e98d4 --- /dev/null +++ b/builtin/providers/opc/import_ip_network_test.go @@ -0,0 +1,33 @@ +package opc + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOPCIPNetwork_importBasic(t *testing.T) { + resourceName := "opc_compute_ip_network.test" + + rInt := acctest.RandInt() + config := testAccOPCIPNetworkConfig_Basic(rInt) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resourceName, testAccOPCCheckIPNetworkDestroyed), + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/opc/resource_ip_network.go b/builtin/providers/opc/resource_ip_network.go index c7281f2fe..ddb0e65c3 100644 --- a/builtin/providers/opc/resource_ip_network.go +++ b/builtin/providers/opc/resource_ip_network.go @@ -13,6 +13,9 @@ func resourceOPCIPNetwork() *schema.Resource { Read: resourceOPCIPNetworkRead, Update: resourceOPCIPNetworkUpdate, Delete: resourceOPCIPNetworkDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { diff --git a/builtin/providers/opc/resource_ip_network_test.go b/builtin/providers/opc/resource_ip_network_test.go index 3f3ffce6e..baf3ed339 100644 --- a/builtin/providers/opc/resource_ip_network_test.go +++ b/builtin/providers/opc/resource_ip_network_test.go @@ -12,7 +12,31 @@ import ( func TestAccOPCIPNetwork_Basic(t *testing.T) { rInt := acctest.RandInt() - resName := "opc_compute_ip_network.foo" + resName := "opc_compute_ip_network.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: opcResourceCheck(resName, testAccOPCCheckIPNetworkDestroyed), + Steps: []resource.TestStep{ + { + Config: testAccOPCIPNetworkConfig_Basic(rInt), + Check: resource.ComposeTestCheckFunc( + opcResourceCheck(resName, testAccOPCCheckIPNetworkExists), + resource.TestCheckResourceAttr(resName, "ip_address_prefix", "10.0.12.0/24"), + resource.TestCheckResourceAttr(resName, "public_napt_enabled", "false"), + resource.TestCheckResourceAttr(resName, "description", fmt.Sprintf("testing-desc-%d", rInt)), + resource.TestCheckResourceAttr(resName, "name", fmt.Sprintf("testing-ip-network-%d", rInt)), + resource.TestMatchResourceAttr(resName, "uri", regexp.MustCompile("testing-ip-network")), + ), + }, + }, + }) +} + +func TestAccOPCIPNetwork_Update(t *testing.T) { + rInt := acctest.RandInt() + resName := "opc_compute_ip_network.test" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -46,7 +70,7 @@ func TestAccOPCIPNetwork_Basic(t *testing.T) { func testAccOPCIPNetworkConfig_Basic(rInt int) string { return fmt.Sprintf(` -resource "opc_compute_ip_network" "foo" { +resource "opc_compute_ip_network" "test" { name = "testing-ip-network-%d" description = "testing-desc-%d" ip_address_prefix = "10.0.12.0/24" @@ -55,7 +79,7 @@ resource "opc_compute_ip_network" "foo" { func testAccOPCIPNetworkConfig_BasicUpdate(rInt int) string { return fmt.Sprintf(` -resource "opc_compute_ip_network" "foo" { +resource "opc_compute_ip_network" "test" { name = "testing-ip-network-%d" description = "testing-desc-%d" ip_address_prefix = "10.0.12.0/24" From bfd6aab40f7f13a93ebdc41161ca883f5238522b Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 11:41:52 +0100 Subject: [PATCH 045/342] Updating the opc layout --- website/source/layouts/opc.erb | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb index 706acbbb5..0f086fff9 100644 --- a/website/source/layouts/opc.erb +++ b/website/source/layouts/opc.erb @@ -3,14 +3,15 @@ -<% end %> -<%= yield %> + <% end %> + + <%= yield %> <% end %> From 965d6ce92e6f9ffd08aca6f7ca5f53fa2bb0cf02 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 11:57:52 +0100 Subject: [PATCH 046/342] Updating the spacing --- ...pc_compute_network_interface.html.markdown | 6 +- .../opc/r/opc_compute_acl.html.markdown | 6 +- ...opc_compute_image_list_entry.html.markdown | 6 +- .../opc/r/opc_compute_instance.html.markdown | 58 +++++++++---------- ...mpute_ip_address_association.html.markdown | 8 +-- ...ompute_ip_address_prefix_set.html.markdown | 6 +- ...mpute_ip_address_reservation.html.markdown | 4 +- .../opc_compute_ip_association.html.markdown | 4 +- .../r/opc_compute_ip_network.html.markdown | 12 ++-- ..._compute_ip_network_exchange.html.markdown | 2 +- .../opc_compute_ip_reservation.html.markdown | 6 +- .../opc/r/opc_compute_route.html.markdown | 12 ++-- .../opc/r/opc_compute_sec_rule.html.markdown | 10 ++-- ...compute_security_application.html.markdown | 14 ++--- ...opc_compute_security_ip_list.html.markdown | 4 +- ...pc_compute_security_protocol.html.markdown | 8 +-- .../r/opc_compute_security_rule.html.markdown | 8 +-- .../opc/r/opc_compute_ssh_key.html.markdown | 6 +- .../opc_compute_storage_volume.html.markdown | 2 +- ...pute_storage_volume_snapshot.html.markdown | 2 +- .../opc/r/opc_compute_vnic_set.html.markdown | 10 ++-- 21 files changed, 97 insertions(+), 97 deletions(-) diff --git a/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown b/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown index b479baeba..ab924e5d4 100644 --- a/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown +++ b/website/source/docs/providers/opc/d/opc_compute_network_interface.html.markdown @@ -14,9 +14,9 @@ Use this data source to access the configuration of an instance's network interf ``` data "opc_compute_network_interface" "foo" { - instance_id = "${opc_compute_instance.my_instance.id}" + instance_id = "${opc_compute_instance.my_instance.id}" instance_name = "${opc_compute_instance.my_instance.name}" - interface = "eth0" + interface = "eth0" } output "mac_address" { @@ -40,7 +40,7 @@ output "vnic" { * `ip_network` - The IP Network assigned to the interface. * `mac_address` - The MAC address of the interface. * `model` - The model of the NIC card used. -* `name_servers` - Array of name servers for the interface. +* `name_servers` - Array of name servers for the interface. * `nat` - The IP Reservation (in IP Networks) associated with the interface. * `search_domains` - The search domains that are sent through DHCP as option 119. * `sec_lists` - The security lists the interface is added to. diff --git a/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown b/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown index 6f26c777b..b69aab166 100644 --- a/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_acl.html.markdown @@ -14,9 +14,9 @@ The ``opc_compute_acl`` resource creates and manages an ACL in an OPC identity d ``` resource "opc_compute_acl" "default" { - name = "ACL1" - description = "This is a description for an acl" - tags = ["tag1", "tag2"] + name = "ACL1" + description = "This is a description for an acl" + tags = ["tag1", "tag2"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown b/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown index 79ab48bc6..6ee27c506 100644 --- a/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown @@ -14,10 +14,10 @@ The ``opc_compute_image_list_entry`` resource creates and manages an Image List ``` resource "opc_compute_image_list_entry" "test" { - name = "imagelist1" + name = "imagelist1" machine_images = ["image1", "image2"] - version = 1 - attributes = </@// ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown index a592c27db..6b63ce55a 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown @@ -14,10 +14,10 @@ The ``opc_compute_ip_address_association`` resource creates and manages an IP ad ``` resource "opc_compute_ip_address_association" "default" { - name = "PrefixSet1" - ip_address_reservation = "${opc_compute_ip_address_reservation.default.name}" - vnic = "${data.opc_compute_vnic.default.name}" - tags = ["tags1", "tags2"] + name = "PrefixSet1" + ip_address_reservation = "${opc_compute_ip_address_reservation.default.name}" + vnic = "${data.opc_compute_vnic.default.name}" + tags = ["tags1", "tags2"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown index 3220644d5..b4ff5c949 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown @@ -14,9 +14,9 @@ The ``opc_compute_ip_address_prefix_set`` resource creates and manages an IP add ``` resource "opc_compute_ip_address_prefix_set" "default" { - name = "PrefixSet1" - prefixes = ["192.168.0.0/16", "172.120.0.0/24"] - tags = ["tags1", "tags2"] + name = "PrefixSet1" + prefixes = ["192.168.0.0/16", "172.120.0.0/24"] + tags = ["tags1", "tags2"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown index 2f95c3523..6a92b8cae 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown @@ -14,8 +14,8 @@ The ``opc_compute_ip_address_reservation`` resource creates and manages an IP ad ``` resource "opc_compute_ip_address_reservation" "default" { - name = "IPAddressReservation1" - ip_address_pool = "public-ippool" + name = "IPAddressReservation1" + ip_address_pool = "public-ippool" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown index 0890dbfdb..8a7c073a2 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown @@ -15,8 +15,8 @@ an OPC identity domain, for the Shared Network. ``` resource "opc_compute_ip_association" "instance1_reservation1" { - vcable = "${opc_compute_instance.test_instance.vcable}" - parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" + vcable = "${opc_compute_instance.test_instance.vcable}" + parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown index 6ce1977ba..6a8885337 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown @@ -14,12 +14,12 @@ The ``opc_compute_ip_network`` resource creates and manages an IP Network. ``` resource "opc_compute_ip_network" "foo" { - name = "my-ip-network" - description = "my IP Network" - ip_address_prefix = "10.0.1.0/24" - ip_network_exchange = "${opc_compute_ip_exchange.foo.name}" - public_napt_enabled = false - tags = ["tag1", "tag2"] + name = "my-ip-network" + description = "my IP Network" + ip_address_prefix = "10.0.1.0/24" + ip_network_exchange = "${opc_compute_ip_exchange.foo.name}" + public_napt_enabled = false + tags = ["tag1", "tag2"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown index 604d95c37..0f07781f6 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown @@ -14,7 +14,7 @@ The ``opc_compute_ip_network_exchange`` resource creates and manages an IP netwo ``` resource "opc_compute_ip_network_exchange" "default" { - name = "NetworkExchange1" + name = "NetworkExchange1" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown index faf2184aa..67632c92d 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown @@ -14,9 +14,9 @@ The ``opc_compute_ip_reservation`` resource creates and manages an IP reservatio ``` resource "opc_compute_ip_reservation" "reservation1" { - parent_pool = "/oracle/public/ippool" - permanent = true - tags = [] + parent_pool = "/oracle/public/ippool" + permanent = true + tags = [ "test" ] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_route.html.markdown b/website/source/docs/providers/opc/r/opc_compute_route.html.markdown index 6fb86c860..f7bfe7e63 100644 --- a/website/source/docs/providers/opc/r/opc_compute_route.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_route.html.markdown @@ -14,12 +14,12 @@ The ``opc_compute_route`` resource creates and manages a route for an IP Network ``` resource "opc_compute_route" "foo" { - name = "my-route" - description = "my IP Network route" - admin_distance = 1 - ip_address_prefix = "10.0.1.0/24" - next_hop_vnic_set = "${opc_compute_vnic_set.bar.name}" - tags = ["tag1", "tag2"] + name = "my-route" + description = "my IP Network route" + admin_distance = 1 + ip_address_prefix = "10.0.1.0/24" + next_hop_vnic_set = "${opc_compute_vnic_set.bar.name}" + tags = ["tag1", "tag2"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown index df222ee49..2593ad007 100644 --- a/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown @@ -14,11 +14,11 @@ The ``opc_compute_sec_rule`` resource creates and manages a sec rule in an OPC i ``` resource "opc_compute_sec_rule" "test_rule" { - name = "test" - source_list = "seclist:${opc_compute_security_list.sec-list1.name}" - destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" - action = "permit" - application = "${opc_compute_security_application.spring-boot.name}" + name = "test" + source_list = "seclist:${opc_compute_security_list.sec-list1.name}" + destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}" + action = "permit" + application = "${opc_compute_security_application.spring-boot.name}" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown index 29e3af03d..0949f9467 100644 --- a/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown @@ -14,9 +14,9 @@ The ``opc_compute_security_application`` resource creates and manages a security ``` resource "opc_compute_security_application" "tomcat" { - name = "tomcat" - protocol = "tcp" - dport = "8080" + name = "tomcat" + protocol = "tcp" + dport = "8080" } ``` @@ -24,10 +24,10 @@ resource "opc_compute_security_application" "tomcat" { ``` resource "opc_compute_security_application" "tomcat" { - name = "tomcat" - protocol = "icmp" - icmptype = "echo" - icmpcode = "protocol" + name = "tomcat" + protocol = "icmp" + icmptype = "echo" + icmpcode = "protocol" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown index 8873d4d49..286ba5f9e 100644 --- a/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown @@ -14,8 +14,8 @@ The ``opc_compute_security_ip_list`` resource creates and manages a security IP ``` resource "opc_compute_security_ip_list" "sec_ip_list1" { - name = "sec-ip-list1" - ip_entries = ["217.138.34.4"] + name = "sec-ip-list1" + ip_entries = ["217.138.34.4"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown index 612ab88b9..46449a899 100644 --- a/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown @@ -14,10 +14,10 @@ The ``opc_compute_security_protocol`` resource creates and manages a security pr ``` resource "opc_compute_security_protocol" "default" { - name = "security-protocol-1" - dst_ports = ["2045-2050"] - src_ports = ["3045-3060"] - ip_protocol = "tcp" + name = "security-protocol-1" + dst_ports = ["2045-2050"] + src_ports = ["3045-3060"] + ip_protocol = "tcp" } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown index c501517ad..d2462980a 100644 --- a/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown @@ -14,10 +14,10 @@ The ``opc_compute_security_rule`` resource creates and manages a security rule i ``` resource "opc_compute_security_rule" "default" { - name = "SecurityRule1" - flow_direction = "ingress" - acl = "${opc_compute_acl.default.name}" - security_protocols = ["${opc_compute_security_protocol.default.name}"] + name = "SecurityRule1" + flow_direction = "ingress" + acl = "${opc_compute_acl.default.name}" + security_protocols = ["${opc_compute_security_protocol.default.name}"] } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown index fd1dcbd9d..2969ebea2 100644 --- a/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown @@ -14,9 +14,9 @@ The ``opc_compute_ssh_key`` resource creates and manages an SSH key in an OPC id ``` resource "opc_compute_ssh_key" "%s" { - name = "test-key" - key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..." - enabled = true + name = "test-key" + key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..." + enabled = true } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index c4a682f41..518a01033 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -36,7 +36,7 @@ resource "opc_compute_storage_volume" "test" { size = 30 tags = ["first", "second"] bootable { - image_list = "${opc_compute_image_list.test.name}" + image_list = "${opc_compute_image_list.test.name}" } } ``` diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown index 13ba265a5..7a167080c 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown @@ -48,7 +48,7 @@ In addition to the attributes above, the following attributes are exported: * `status` - Status of the snapshot. * `status_detail` - Details about the latest state of the storage volume snapshot. * `status_timestamp` - Indicates the time that the current view of the storage volume snapshot was generated. -* `uri` - Uniform Resource Identifier +* `uri` - Uniform Resource Identifier ## Import diff --git a/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown index a0531b948..c5cf9c455 100644 --- a/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown @@ -14,11 +14,11 @@ The ``opc_compute_vnic_set`` resource creates and manages a virtual NIC set in a ``` resource "opc_compute_vnic_set" "test_set" { - name = "test_vnic_set" - description = "My vnic set" - applied_acls = ["acl1", "acl2"] - virtual_nics = ["nic1", "nic2", "nic3"] - tags = ["xyzzy", "quux"] + name = "test_vnic_set" + description = "My vnic set" + applied_acls = ["acl1", "acl2"] + virtual_nics = ["nic1", "nic2", "nic3"] + tags = ["xyzzy", "quux"] } ``` From 7e9fbc6b88182ce63db81a6e02daca3434731e0e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 12:09:31 +0100 Subject: [PATCH 047/342] Updating the example for image list entries --- .../opc/r/opc_compute_image_list_entry.html.markdown | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown b/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown index 6ee27c506..b062773e8 100644 --- a/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_image_list_entry.html.markdown @@ -13,9 +13,15 @@ The ``opc_compute_image_list_entry`` resource creates and manages an Image List ## Example Usage ``` +resource "opc_compute_image_list" "test" { + name = "imagelist1" + description = "This is a description of the Image List" + default = 21 +} + resource "opc_compute_image_list_entry" "test" { - name = "imagelist1" - machine_images = ["image1", "image2"] + name = "${opc_compute_image_list.test.name}" + machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ] version = 1 attributes = < Date: Fri, 7 Apr 2017 12:12:15 +0100 Subject: [PATCH 048/342] Fixing sidebar selection for multiple links --- .../docs/providers/opc/r/opc_compute_image_list.html.markdown | 2 +- .../providers/opc/r/opc_compute_storage_volume.html.markdown | 2 +- website/source/layouts/opc.erb | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown index f5899520b..727114c2e 100644 --- a/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_image_list.html.markdown @@ -1,7 +1,7 @@ --- layout: "opc" page_title: "Oracle: opc_compute_image_list" -sidebar_current: "docs-opc-resource-image-list" +sidebar_current: "docs-opc-resource-image-list-type" description: |- Creates and manages an Image List in an OPC identity domain. --- diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown index 518a01033..ccafb7f96 100644 --- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown @@ -1,7 +1,7 @@ --- layout: "opc" page_title: "Oracle: opc_compute_storage_volume" -sidebar_current: "docs-opc-resource-storage-volume" +sidebar_current: "docs-opc-resource-storage-volume-type" description: |- Creates and manages a storage volume in an OPC identity domain. --- diff --git a/website/source/layouts/opc.erb b/website/source/layouts/opc.erb index 0f086fff9..b85ec7a7d 100644 --- a/website/source/layouts/opc.erb +++ b/website/source/layouts/opc.erb @@ -28,7 +28,7 @@ > opc_compute_acl - > + > opc_compute_image_list > @@ -85,7 +85,7 @@ > opc_compute_ssh_key - > + > opc_compute_storage_volume > From 50cfe7e359cab76e2f0cd66c413d2b2cdc6a1193 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 12:17:36 +0100 Subject: [PATCH 049/342] Updating the instance docs --- .../opc/r/opc_compute_instance.html.markdown | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown index 3f455b6d4..a502d3016 100644 --- a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown @@ -19,28 +19,35 @@ on your instance resources as an extra safety measure. ## Example Usage ``` -resource "opc_compute_instance" "test_instance" { - name = "test" - label = "test" - shape = "oc3" - imageList = "/oracle/public/oel_6.4_2GB_v1" - sshKeys = ["${opc_compute_ssh_key.key1.name}"] +resource "opc_compute_storage_volume" "test" { + name = "internal" + size = 100 +} + +resource "opc_compute_instance" "test" { + name = "instance1" + label = "Terraform Provisioned Instance" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + + storage { + volume = "${opc_compute_storage_volume.test.name}" + index = 1 + } + networking_info { index = 0 model = "e1000" nat = ["ippool:/oracle/public/ippool"] shared_network = true } + networking_info { index = 1 ip_network = "${opc_compute_ip_network.foo.id}" vnic = "testing-vnic-name" shared_network = false } - storage { - volume = "${opc_compute_storage_volume.foo.name}" - index = 1 - } } ``` From 8a5379479a60d6ce97e7891a9ce69c7b8fca542f Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 13:06:16 +0100 Subject: [PATCH 050/342] Removing the unused ssh key --- builtin/providers/opc/resource_instance_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/opc/resource_instance_test.go b/builtin/providers/opc/resource_instance_test.go index 2cc61eb39..a29f08c8d 100644 --- a/builtin/providers/opc/resource_instance_test.go +++ b/builtin/providers/opc/resource_instance_test.go @@ -177,8 +177,6 @@ func testAccOPCCheckInstanceDestroy(s *terraform.State) error { return nil } -const validSSHKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIkZEr5UcMojtxhk6Zum39NOihHNXEvRWDt5WssX8TH/ghpv3D25K1pJkf+wfAi17HwEmYwPMEyEHENS443v6RZbXvzCkUWzkJzq7Zvbdqld038km31La2QUoMMp1KL5zk1nM65xCeQDVcR/h++03EScB2CuzTpAV6khMdfgOJgxm361kfrDVRwc1HQrAOpOnzkpPfwqBrYWqN1UnKvuO77Wk8z5LBe03EPNru3bLE3s3qHI9hjO0gXMiVUi0KyNxdWfDO8esqQlKavHAeePyrRA55YF8kBB5dEl4tVNOqpY/8TRnGN1mOe0LWxa8Ytz1wbyS49knsNVTel" - func testAccInstanceBasic(rInt int) string { return fmt.Sprintf(` resource "opc_compute_instance" "test" { From 82e57870bb4443cf1099d5712cd0e3a323ea96a6 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 13:07:21 +0100 Subject: [PATCH 051/342] Updating the docs for Instances --- .../opc/r/opc_compute_instance.html.markdown | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown index a502d3016..881045b42 100644 --- a/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown +++ b/website/source/docs/providers/opc/r/opc_compute_instance.html.markdown @@ -19,36 +19,36 @@ on your instance resources as an extra safety measure. ## Example Usage ``` +resource "opc_compute_ip_network" "test" { + name = "internal-network" + description = "Terraform Provisioned Internal Network" + ip_address_prefix = "10.0.1.0/24" + public_napt_enabled = false +} + resource "opc_compute_storage_volume" "test" { name = "internal" size = 100 } resource "opc_compute_instance" "test" { - name = "instance1" - label = "Terraform Provisioned Instance" - shape = "oc3" - image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" + name = "instance1" + label = "Terraform Provisioned Instance" + shape = "oc3" + image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" - storage { - volume = "${opc_compute_storage_volume.test.name}" - index = 1 - } + storage { + volume = "${opc_compute_storage_volume.test.name}" + index = 1 + } networking_info { index = 0 - model = "e1000" nat = ["ippool:/oracle/public/ippool"] shared_network = true } - - networking_info { - index = 1 - ip_network = "${opc_compute_ip_network.foo.id}" - vnic = "testing-vnic-name" - shared_network = false - } } + ``` ## Argument Reference From 72aab5a13fca4427c8f90fc9b109ec6f2232d125 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 7 Apr 2017 13:14:22 +0100 Subject: [PATCH 052/342] Removing an unneeded line --- website/source/docs/providers/opc/index.html.markdown | 1 - 1 file changed, 1 deletion(-) diff --git a/website/source/docs/providers/opc/index.html.markdown b/website/source/docs/providers/opc/index.html.markdown index 1086f310a..b23d54687 100644 --- a/website/source/docs/providers/opc/index.html.markdown +++ b/website/source/docs/providers/opc/index.html.markdown @@ -45,7 +45,6 @@ The following arguments are supported: * `endpoint` - (Optional) The API endpoint to use, associated with your Oracle Public Cloud account. This is known as the `REST Endpoint` within the Oracle portal. It can also be sourced from the `OPC_ENDPOINT` environment variable. -Max num seconds to wait for successful response when operating on resources within OPC (defaults to 3000) * `max_retry_timeout` - (Optional) The maximum number of seconds to wait for a successful response when operating on resources within Oracle Public Cloud. It can also be sourced from the `OPC_MAX_RETRY_TIMEOUT` environment variable. Defaults to 3000 seconds. ## Testing From 5c742572e4c9c14c6dbb7cb65695b786f751c8fe Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 7 Apr 2017 10:41:59 -0400 Subject: [PATCH 053/342] fix data_source_network_interface test --- builtin/providers/opc/data_source_network_interface_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/opc/data_source_network_interface_test.go b/builtin/providers/opc/data_source_network_interface_test.go index 392748980..7cf65e491 100644 --- a/builtin/providers/opc/data_source_network_interface_test.go +++ b/builtin/providers/opc/data_source_network_interface_test.go @@ -39,7 +39,6 @@ func TestAccOPCDataSourceNetworkInterface_sharedNetwork(t *testing.T) { { Config: testAccDataSourceNetworkInterfaceShared(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resName, "model", "e1000"), resource.TestCheckResourceAttr(resName, "nat.#", "1"), resource.TestCheckResourceAttr(resName, "shared_network", "true"), resource.TestCheckResourceAttr(resName, "sec_lists.#", "1"), @@ -89,7 +88,6 @@ resource "opc_compute_instance" "test" { tags = ["tag1", "tag2"] networking_info { index = 0 - model = "e1000" nat = ["ippool:/oracle/public/ippool"] shared_network = true } From 4b281ba051d8df84e1db2d8569ddaa9b34acbdc6 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 7 Apr 2017 11:00:26 -0400 Subject: [PATCH 054/342] fixup tests --- builtin/providers/opc/import_instance_test.go | 9 +++++---- builtin/providers/opc/resource_ip_association_test.go | 2 +- .../providers/opc/resource_security_association_test.go | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/builtin/providers/opc/import_instance_test.go b/builtin/providers/opc/import_instance_test.go index e3bbcc32c..f31654443 100644 --- a/builtin/providers/opc/import_instance_test.go +++ b/builtin/providers/opc/import_instance_test.go @@ -25,10 +25,11 @@ func TestAccOPCInstance_importBasic(t *testing.T) { Config: testAccInstanceBasic(rInt), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateIdPrefix: instanceName + "/", + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: instanceName + "/", + ImportStateVerifyIgnore: []string{"instance_attributes"}, }, }, }) diff --git a/builtin/providers/opc/resource_ip_association_test.go b/builtin/providers/opc/resource_ip_association_test.go index a00c72473..57ac22afb 100644 --- a/builtin/providers/opc/resource_ip_association_test.go +++ b/builtin/providers/opc/resource_ip_association_test.go @@ -82,7 +82,7 @@ resource "opc_compute_ip_reservation" "test" { } resource "opc_compute_ip_association" "test" { - vcable = "${opc_compute_instance.test.vcable_id}" + vcable = "${opc_compute_instance.test.vcable}" parent_pool = "ipreservation:${opc_compute_ip_reservation.test.name}" } ` diff --git a/builtin/providers/opc/resource_security_association_test.go b/builtin/providers/opc/resource_security_association_test.go index b933406d5..c657494c4 100644 --- a/builtin/providers/opc/resource_security_association_test.go +++ b/builtin/providers/opc/resource_security_association_test.go @@ -122,7 +122,7 @@ resource "opc_compute_instance" "test" { resource "opc_compute_security_association" "test" { name = "acc-test-sec-ass-%d" - vcable = "${opc_compute_instance.test.vcable_id}" + vcable = "${opc_compute_instance.test.vcable}" seclist = "${opc_compute_security_list.test.name}" } ` From 8d7db2cd38055c422133146184b79a358002bf57 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 7 Apr 2017 11:31:32 -0400 Subject: [PATCH 055/342] fixup tests --- builtin/providers/opc/resource_security_association_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/opc/resource_security_association_test.go b/builtin/providers/opc/resource_security_association_test.go index c657494c4..811673099 100644 --- a/builtin/providers/opc/resource_security_association_test.go +++ b/builtin/providers/opc/resource_security_association_test.go @@ -101,7 +101,7 @@ resource "opc_compute_instance" "test" { } resource "opc_compute_security_association" "test" { - vcable = "${opc_compute_instance.test.vcable_id}" + vcable = "${opc_compute_instance.test.vcable}" seclist = "${opc_compute_security_list.test.name}" } ` From 2f88ac2e2b8f3e2f41bd6a9dde4e5876e6adb390 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 7 Apr 2017 12:06:45 -0400 Subject: [PATCH 056/342] Cleanup vet errors --- builtin/providers/opc/resource_image_list_entry_test.go | 2 +- builtin/providers/opc/resource_instance.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/opc/resource_image_list_entry_test.go b/builtin/providers/opc/resource_image_list_entry_test.go index 4d76bd0a6..a74fa9aec 100644 --- a/builtin/providers/opc/resource_image_list_entry_test.go +++ b/builtin/providers/opc/resource_image_list_entry_test.go @@ -97,7 +97,7 @@ func testAccCheckImageListEntryDestroy(s *terraform.State) error { name, version, err := parseOPCImageListEntryID(rs.Primary.ID) if err != nil { - return fmt.Errorf("Error parsing the Image List ID: $+v", err) + return fmt.Errorf("Error parsing the Image List ID: %+v", err) } input := compute.GetImageListEntryInput{ diff --git a/builtin/providers/opc/resource_instance.go b/builtin/providers/opc/resource_instance.go index a2840829e..686ff7b0a 100644 --- a/builtin/providers/opc/resource_instance.go +++ b/builtin/providers/opc/resource_instance.go @@ -405,7 +405,7 @@ func resourceInstanceCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating instance %s: %s", input.Name, err) } - log.Printf("[DEBUG] Created instance %s: %#v", result.ID) + log.Printf("[DEBUG] Created instance %s: %#v", input.Name, result.ID) d.SetId(result.ID) From bacfefec4211f34221649029b560067addf20a7e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 10 Apr 2017 12:19:56 +0100 Subject: [PATCH 057/342] Fixing the import list --- builtin/providers/opc/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/opc/config.go b/builtin/providers/opc/config.go index eb4b9612d..b82594cca 100644 --- a/builtin/providers/opc/config.go +++ b/builtin/providers/opc/config.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-oracle-terraform/compute" "github.com/hashicorp/go-oracle-terraform/opc" "github.com/hashicorp/terraform/helper/logging" From bb2afcb38e6f15d6e335afda8a703a292fdc9639 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 10 Apr 2017 12:22:16 +0100 Subject: [PATCH 058/342] Refactoring the Complete test / identifying Update as Update --- .../opc/import_security_protocol_test.go | 4 +-- .../opc/resource_security_protocol_test.go | 30 +++++++++++++++++-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/builtin/providers/opc/import_security_protocol_test.go b/builtin/providers/opc/import_security_protocol_test.go index da1df1176..109d5d3fe 100644 --- a/builtin/providers/opc/import_security_protocol_test.go +++ b/builtin/providers/opc/import_security_protocol_test.go @@ -32,11 +32,11 @@ func TestAccOPCSecurityProtocol_importBasic(t *testing.T) { }, }) } -func TestAccOPCSecurityProtocol_importDisabled(t *testing.T) { +func TestAccOPCSecurityProtocol_importComplete(t *testing.T) { resourceName := "opc_compute_security_protocol.test" ri := acctest.RandInt() - config := fmt.Sprintf(testAccOPCSecurityProtocolFull, ri) + config := fmt.Sprintf(testAccOPCSecurityProtocolComplete, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { diff --git a/builtin/providers/opc/resource_security_protocol_test.go b/builtin/providers/opc/resource_security_protocol_test.go index 77efeef1e..c1a9c76d4 100644 --- a/builtin/providers/opc/resource_security_protocol_test.go +++ b/builtin/providers/opc/resource_security_protocol_test.go @@ -29,10 +29,34 @@ func TestAccOPCSecurityProtocol_Basic(t *testing.T) { }) } -func TestAccOPCSecurityProtocol_Full(t *testing.T) { +func TestAccOPCSecurityProtocol_Complete(t *testing.T) { protocolResourceName := "opc_compute_security_protocol.test" ri := acctest.RandInt() - config := fmt.Sprintf(testAccOPCSecurityProtocolFull, ri) + config := fmt.Sprintf(testAccOPCSecurityProtocolComplete, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSecurityProtocolDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckSecurityProtocolExists, + resource.TestCheckResourceAttr(protocolResourceName, "description", "Terraform Acceptance Test"), + resource.TestCheckResourceAttr(protocolResourceName, "dst_ports.0", "2025-2030"), + resource.TestCheckResourceAttr(protocolResourceName, "src_ports.0", "3025-3030"), + resource.TestCheckResourceAttr(protocolResourceName, "ip_protocol", "tcp"), + ), + }, + }, + }) +} + +func TestAccOPCSecurityProtocol_Update(t *testing.T) { + protocolResourceName := "opc_compute_security_protocol.test" + ri := acctest.RandInt() + config := fmt.Sprintf(testAccOPCSecurityProtocolComplete, ri) config2 := fmt.Sprintf(testAccOPCSecurityProtocolUpdated, ri) resource.Test(t, resource.TestCase{ @@ -109,7 +133,7 @@ resource "opc_compute_security_protocol" "test" { } ` -const testAccOPCSecurityProtocolFull = ` +const testAccOPCSecurityProtocolComplete = ` resource "opc_compute_security_protocol" "test" { name = "acc-security-protocol-%d" description = "Terraform Acceptance Test" From d9eec24e6baabd93cfdf85d8514fdd3b5a368a44 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 10 Apr 2017 12:26:22 +0100 Subject: [PATCH 059/342] Hooking up the Image List Entry tests --- builtin/providers/opc/import_storage_volume_test.go | 2 +- builtin/providers/opc/resource_storage_volume_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/opc/import_storage_volume_test.go b/builtin/providers/opc/import_storage_volume_test.go index fdd871462..f97599fd2 100644 --- a/builtin/providers/opc/import_storage_volume_test.go +++ b/builtin/providers/opc/import_storage_volume_test.go @@ -99,7 +99,7 @@ func TestAccOPCStorageVolume_importBootable(t *testing.T) { func TestAccOPCStorageVolume_importImageListEntry(t *testing.T) { resourceName := "opc_compute_storage_volume.test" rInt := acctest.RandInt() - config := fmt.Sprintf(testAccStorageVolumeBootable, rInt, rInt) + config := fmt.Sprintf(testAccStorageVolumeImageListEntry, rInt, rInt) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/builtin/providers/opc/resource_storage_volume_test.go b/builtin/providers/opc/resource_storage_volume_test.go index cf2209802..5e8d24699 100644 --- a/builtin/providers/opc/resource_storage_volume_test.go +++ b/builtin/providers/opc/resource_storage_volume_test.go @@ -281,7 +281,7 @@ resource "opc_compute_image_list_entry" "test" { } resource "opc_compute_storage_volume" "test" { - name = "test-acc-stor-vol-bootable-%d" + name = "test-acc-stor-vol-image-list-entry-%d" description = "Provider Acceptance Tests Storage Volume Image List Entry" size = 20 tags = ["bar", "foo"] From 02ed0e4b0947ad7eb84010820d85bde74c11f8ce Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 10 Apr 2017 12:28:17 +0100 Subject: [PATCH 060/342] Updating the error message --- builtin/providers/opc/provider_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/opc/provider_test.go b/builtin/providers/opc/provider_test.go index 8c2c842d8..db6dc8a24 100644 --- a/builtin/providers/opc/provider_test.go +++ b/builtin/providers/opc/provider_test.go @@ -23,7 +23,7 @@ func init() { func TestProvider(t *testing.T) { if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) + t.Fatalf("Error creating Provider: %s", err) } } From 40fc19d812cac240b59ffde3505cb0b27f0f53ae Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 11 Apr 2017 09:57:15 -0400 Subject: [PATCH 061/342] provider/aws: Update instance documentation Updates instance documentation. Fixes: #13514 --- website/source/docs/providers/aws/d/instance.html.markdown | 2 +- website/source/docs/providers/aws/r/instance.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/d/instance.html.markdown b/website/source/docs/providers/aws/d/instance.html.markdown index 65f0a5edc..4ce98426d 100644 --- a/website/source/docs/providers/aws/d/instance.html.markdown +++ b/website/source/docs/providers/aws/d/instance.html.markdown @@ -69,7 +69,7 @@ interpolation. * `device_name` - The physical name of the device. * `no_device` - Whether the specified device included in the device mapping was suppressed or not (Boolean). * `virtual_name` - The virtual device name. -* `iam_instance_profile` - The instance profile associated with the Instance. Specified as an ARN. +* `iam_instance_profile` - The name of the instance profile associated with the Instance. * `instance_type` - The type of the Instance. * `key_name` - The key name of the Instance. * `monitoring` - Whether detailed monitoring is enabled or disabled for the Instance (Boolean). diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index a71080a87..33ebdc549 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -76,7 +76,7 @@ instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/Use the destination address does not match the instance. Used for NAT or VPNs. Defaults true. * `user_data` - (Optional) The user data to provide when launching the instance. * `iam_instance_profile` - (Optional) The IAM Instance Profile to - launch the instance with. + launch the instance with. Specified as the name of the Instance Profile. * `ipv6_address_count`- (Optional) A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. * `ipv6_addresses` - (Optional) Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface * `tags` - (Optional) A mapping of tags to assign to the resource. From 6a3ed342fa0e712ff206a6f1c90b66a8519f9da1 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 11 Apr 2017 10:21:48 -0400 Subject: [PATCH 062/342] provider/vsphere: Add keep_on_remove to docs Fixes: #13518 --- .../docs/providers/vsphere/r/virtual_machine.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown index 806ec0b63..d1aa56c5b 100644 --- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -121,7 +121,8 @@ The `disk` block supports: * `type` - (Optional) 'eager_zeroed' (the default), 'lazy', or 'thin' are supported options. * `vmdk` - (Required if template and size not provided) Path to a vmdk in a vSphere datastore. * `bootable` - (Optional) Set to 'true' if a vmdk was given and it should attempt to boot after creation. -* `controller_type` = (Optional) Controller type to attach the disk to. 'scsi' (the default), or 'ide' are supported options. +* `controller_type` - (Optional) Controller type to attach the disk to. 'scsi' (the default), or 'ide' are supported options. +* `keep_on_remove` - (Optional) Set to 'true' to not delete a disk on removal. ## CDROM From db7fe7fe77d6947671f1d75b2caa281501ed0f6c Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 11 Apr 2017 14:45:10 -0400 Subject: [PATCH 063/342] provider/aws: Fix panic on nil route configs When creating an `aws_route_table`, if a `route` configuration block is left `nil`, Terraform would previously panic. This allows Terraform to catch a faulty interface conversion during the resource create. The resource will still fail to apply, however, since every item in the `route` element is `Optional` we cannot currently catch this error during plan time, via validation. Fixes: #13545 --- .../providers/aws/resource_aws_route_table.go | 5 +++- .../aws/resource_aws_route_table_test.go | 29 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_route_table.go b/builtin/providers/aws/resource_aws_route_table.go index c92dbde16..76ed91381 100644 --- a/builtin/providers/aws/resource_aws_route_table.go +++ b/builtin/providers/aws/resource_aws_route_table.go @@ -452,7 +452,10 @@ func resourceAwsRouteTableDelete(d *schema.ResourceData, meta interface{}) error func resourceAwsRouteTableHash(v interface{}) int { var buf bytes.Buffer - m := v.(map[string]interface{}) + m, castOk := v.(map[string]interface{}) + if !castOk { + return 0 + } if v, ok := m["ipv6_cidr_block"]; ok { buf.WriteString(fmt.Sprintf("%s-", v.(string))) diff --git a/builtin/providers/aws/resource_aws_route_table_test.go b/builtin/providers/aws/resource_aws_route_table_test.go index 68fd9237b..932f33e9d 100644 --- a/builtin/providers/aws/resource_aws_route_table_test.go +++ b/builtin/providers/aws/resource_aws_route_table_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -183,6 +184,21 @@ func TestAccAWSRouteTable_tags(t *testing.T) { }) } +func TestAccAWSRouteTable_panic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_route_table.foo", + Providers: testAccProviders, + CheckDestroy: testAccCheckRouteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccRouteTableConfigPanic, + ExpectError: regexp.MustCompile("The request must contain the parameter destinationCidrBlock or destinationIpv6CidrBlock"), + }, + }, + }) +} + func testAccCheckRouteTableDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -497,3 +513,16 @@ resource "aws_route_table" "foo" { propagating_vgws = ["${aws_vpn_gateway.foo.id}"] } ` + +const testAccRouteTableConfigPanic = ` +resource "aws_vpc" "foo" { + cidr_block = "10.2.0.0/16" +} + +resource "aws_route_table" "foo" { + vpc_id = "${aws_vpc.foo.id}" + + route { + } +} +` From 2e19cf5ad3b01fd236d05d74919c7ac6cec14e2f Mon Sep 17 00:00:00 2001 From: = Date: Tue, 11 Apr 2017 15:10:13 -0600 Subject: [PATCH 064/342] Fixes TestAccTritonMachine_addNIC and TestAccTritonMachine_nic --- .../providers/triton/resource_machine_test.go | 39 ++++++++++--------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/builtin/providers/triton/resource_machine_test.go b/builtin/providers/triton/resource_machine_test.go index 92152fcdc..d6ac9168f 100644 --- a/builtin/providers/triton/resource_machine_test.go +++ b/builtin/providers/triton/resource_machine_test.go @@ -64,7 +64,7 @@ func TestAccTritonMachine_dns(t *testing.T) { func TestAccTritonMachine_nic(t *testing.T) { machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - config := testAccTritonMachine_singleNIC(machineName, acctest.RandIntRange(1024, 2048)) + config := testAccTritonMachine_singleNIC(machineName, acctest.RandIntRange(1024, 2048), acctest.RandIntRange(0, 256)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -89,9 +89,10 @@ func TestAccTritonMachine_nic(t *testing.T) { func TestAccTritonMachine_addNIC(t *testing.T) { machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) vlanNumber := acctest.RandIntRange(1024, 2048) + subnetNumber := acctest.RandIntRange(0, 256) - singleNICConfig := testAccTritonMachine_singleNIC(machineName, vlanNumber) - dualNICConfig := testAccTritonMachine_dualNIC(machineName, vlanNumber) + singleNICConfig := testAccTritonMachine_singleNIC(machineName, vlanNumber, subnetNumber) + dualNICConfig := testAccTritonMachine_dualNIC(machineName, vlanNumber, subnetNumber) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -361,7 +362,7 @@ resource "triton_machine" "test" { } } ` -var testAccTritonMachine_singleNIC = func(name string, vlanNumber int) string { +var testAccTritonMachine_singleNIC = func(name string, vlanNumber int, subnetNumber int) string { return fmt.Sprintf(`resource "triton_vlan" "test" { vlan_id = %d name = "%s-vlan" @@ -373,10 +374,10 @@ resource "triton_fabric" "test" { description = "test network" vlan_id = "${triton_vlan.test.vlan_id}" - subnet = "10.10.0.0/24" - gateway = "10.10.0.1" - provision_start_ip = "10.10.0.10" - provision_end_ip = "10.10.0.250" + subnet = "10.%d.0.0/24" + gateway = "10.%d.0.1" + provision_start_ip = "10.%d.0.10" + provision_end_ip = "10.%d.0.250" resolvers = ["8.8.8.8", "8.8.4.4"] } @@ -393,10 +394,10 @@ resource "triton_machine" "test" { nic { network = "${triton_fabric.test.id}" } -}`, vlanNumber, name, name, name) +}`, vlanNumber, name, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name) } -var testAccTritonMachine_dualNIC = func(name string, vlanNumber int) string { +var testAccTritonMachine_dualNIC = func(name string, vlanNumber, subnetNumber int) string { return fmt.Sprintf(`resource "triton_vlan" "test" { vlan_id = %d name = "%s-vlan" @@ -408,10 +409,10 @@ resource "triton_fabric" "test" { description = "test network" vlan_id = "${triton_vlan.test.vlan_id}" - subnet = "10.10.0.0/24" - gateway = "10.10.0.1" - provision_start_ip = "10.10.0.10" - provision_end_ip = "10.10.0.250" + subnet = "10.%d.0.0/24" + gateway = "10.%d.0.1" + provision_start_ip = "10.%d.0.10" + provision_end_ip = "10.%d.0.250" resolvers = ["8.8.8.8", "8.8.4.4"] } @@ -421,10 +422,10 @@ resource "triton_fabric" "test_add" { description = "test network 2" vlan_id = "${triton_vlan.test.vlan_id}" - subnet = "172.23.0.0/24" - gateway = "172.23.0.1" - provision_start_ip = "172.23.0.10" - provision_end_ip = "172.23.0.250" + subnet = "172.23.%d.0/24" + gateway = "172.23.%d.1" + provision_start_ip = "172.23.%d.10" + provision_end_ip = "172.23.%d.250" resolvers = ["8.8.8.8", "8.8.4.4"] } @@ -444,7 +445,7 @@ resource "triton_machine" "test" { nic { network = "${triton_fabric.test_add.id}" } -}`, vlanNumber, name, name, name, name) +}`, vlanNumber, name, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name) } var testAccTritonMachine_dns = ` From 27e1b28b34e32d70f0fb38b717405d0b27327c63 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Apr 2017 00:11:46 +0100 Subject: [PATCH 065/342] Making use of the Location Schema --- .../azurerm/resource_arm_eventhub_authorization_rule.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go index 4b3f1fc81..c231f0bfd 100644 --- a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go +++ b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go @@ -45,11 +45,7 @@ func resourceArmEventHubAuthorizationRule() *schema.Resource { ForceNew: true, }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + "location": locationSchema(), "listen": { Type: schema.TypeBool, From 01c8b0197cd8fb800bf63772696d635d7fe3a208 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Apr 2017 00:25:30 +0100 Subject: [PATCH 066/342] Switching to use the locationSchema for Event Hub Consumer Groups --- .../azurerm/resource_arm_eventhub_consumer_group.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go index 648fc761c..f188c15f5 100644 --- a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go +++ b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go @@ -45,11 +45,7 @@ func resourceArmEventHubConsumerGroup() *schema.Resource { ForceNew: true, }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + "location": locationSchema(), "user_metadata": { Type: schema.TypeString, From 0c76579c0cf1d61b82ed62cb5bf38d0da5187697 Mon Sep 17 00:00:00 2001 From: Stephen Weatherford Date: Wed, 5 Apr 2017 21:32:02 +0000 Subject: [PATCH 067/342] Use Incremental in sample, and Empty available for data disks --- .../providers/azurerm/r/template_deployment.html.markdown | 2 +- .../docs/providers/azurerm/r/virtual_machine.html.markdown | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown index 7bfbb821f..d6f7b659b 100644 --- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown +++ b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown @@ -74,7 +74,7 @@ resource "azurerm_template_deployment" "test" { } DEPLOY - deployment_mode = "Complete" + deployment_mode = "Incremental" } ``` diff --git a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown index 3beb03727..565a8accb 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown @@ -176,7 +176,7 @@ resource "azurerm_virtual_machine" "test" { name = "datadisk0" vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/datadisk0.vhd" disk_size_gb = "1023" - create_option = "empty" + create_option = "Empty" lun = 0 } @@ -345,7 +345,7 @@ For more information on the different example configurations, please check out t * `vhd_uri` - (Optional) Specifies the vhd uri. Changing this forces a new resource to be created. Cannot be used with managed disks. * `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Value you must be either `Standard_LRS` or `Premium_LRS`. Cannot be used when `vhd_uri` is specified. * `managed_disk_id` - (Optional) Specifies an existing managed disk to use by id. Can only be used when `create_option` is `Attach`. Cannot be used when `vhd_uri` is specified. -* `create_option` - (Required) Specifies how the virtual machine should be created. Possible values are `attach` and `FromImage`. +* `create_option` - (Required) Specifies how the virtual machine should be created. Possible values are `Attach` and `FromImage`. * `caching` - (Optional) Specifies the caching requirements. * `image_uri` - (Optional) Specifies the image_uri in the form publisherName:offer:skus:version. `image_uri` can also specify the [VHD uri](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-cli-deploy-templates/#create-a-custom-vm-image) of a custom VM image to clone. When cloning a custom disk image the `os_type` documented below becomes required. * `os_type` - (Optional) Specifies the operating system Type, valid values are windows, linux. @@ -357,7 +357,7 @@ For more information on the different example configurations, please check out t * `vhd_uri` - (Optional) Specifies the uri of the location in storage where the vhd for the virtual machine should be placed. Cannot be used with managed disks. * `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Value you must be either `Standard_LRS` or `Premium_LRS`. Cannot be used when `vhd_uri` is specified. * `managed_disk_id` - (Optional) Specifies an existing managed disk to use by id. Can only be used when `create_option` is `Attach`. Cannot be used when `vhd_uri` is specified. -* `create_option` - (Required) Specifies how the data disk should be created. +* `create_option` - (Required) Specifies how the data disk should be created. Possible values are `Attach`, `FromImage` and `Empty`. * `disk_size_gb` - (Required) Specifies the size of the data disk in gigabytes. * `caching` - (Optional) Specifies the caching requirements. * `lun` - (Required) Specifies the logical unit number of the data disk. From ae2a05e29f5a8f955d31b1c2eaaaadc991e0a1ba Mon Sep 17 00:00:00 2001 From: Stephen Weatherford Date: Mon, 10 Apr 2017 21:47:31 +0000 Subject: [PATCH 068/342] Note behavior of destroying deployment template in docs --- .../providers/azurerm/r/template_deployment.html.markdown | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown index d6f7b659b..298286b43 100644 --- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown +++ b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown @@ -97,4 +97,8 @@ The following arguments are supported: The following attributes are exported: -* `id` - The Template Deployment ID. \ No newline at end of file +* `id` - The Template Deployment ID. + +## Note + +Terraform does not know about the individual resources created by Azure using a deployment template and therefore cannot delete these resources during a destroy. Destroying a template deployment removes the associated deployment operations, but will not delete the Azure resources created by the deployment. In order to delete these resources, the containing resource group must also be destroyed. [More information](https://docs.microsoft.com/en-us/rest/api/resources/deployments#Deployments_Delete). From 1cf0cd87cb34207cb129be3d888149b449dd1bfb Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 08:33:26 +0100 Subject: [PATCH 069/342] provider/kubernetes: Add support for persistent_volume_claim (#13527) --- builtin/providers/kubernetes/provider.go | 9 +- ...urce_kubernetes_persistent_volume_claim.go | 266 +++++++ ...kubernetes_persistent_volume_claim_test.go | 667 ++++++++++++++++++ .../structure_persistent_volume_claim.go | 134 ++++ .../r/persistent_volume_claim.html.markdown | 114 +++ website/source/layouts/kubernetes.erb | 3 + 6 files changed, 1189 insertions(+), 4 deletions(-) create mode 100644 builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go create mode 100644 builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go create mode 100644 builtin/providers/kubernetes/structure_persistent_volume_claim.go create mode 100644 website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown diff --git a/builtin/providers/kubernetes/provider.go b/builtin/providers/kubernetes/provider.go index 61c18e80a..8a613ab13 100644 --- a/builtin/providers/kubernetes/provider.go +++ b/builtin/providers/kubernetes/provider.go @@ -86,10 +86,11 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "kubernetes_config_map": resourceKubernetesConfigMap(), - "kubernetes_namespace": resourceKubernetesNamespace(), - "kubernetes_persistent_volume": resourceKubernetesPersistentVolume(), - "kubernetes_secret": resourceKubernetesSecret(), + "kubernetes_config_map": resourceKubernetesConfigMap(), + "kubernetes_namespace": resourceKubernetesNamespace(), + "kubernetes_persistent_volume": resourceKubernetesPersistentVolume(), + "kubernetes_persistent_volume_claim": resourceKubernetesPersistentVolumeClaim(), + "kubernetes_secret": resourceKubernetesSecret(), }, ConfigureFunc: providerConfigure, } diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go new file mode 100644 index 000000000..ca0150eaa --- /dev/null +++ b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go @@ -0,0 +1,266 @@ +package kubernetes + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + pkgApi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + api "k8s.io/kubernetes/pkg/api/v1" + kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" +) + +func resourceKubernetesPersistentVolumeClaim() *schema.Resource { + return &schema.Resource{ + Create: resourceKubernetesPersistentVolumeClaimCreate, + Read: resourceKubernetesPersistentVolumeClaimRead, + Exists: resourceKubernetesPersistentVolumeClaimExists, + Update: resourceKubernetesPersistentVolumeClaimUpdate, + Delete: resourceKubernetesPersistentVolumeClaimDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("wait_until_bound", true) + return []*schema.ResourceData{d}, nil + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "metadata": namespacedMetadataSchema("persistent volume claim", true), + "spec": { + Type: schema.TypeList, + Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_modes": { + Type: schema.TypeSet, + Description: "A set of the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "resources": { + Type: schema.TypeList, + Description: "A list of the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeMap, + Description: "Map describing the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", + Optional: true, + ForceNew: true, + }, + "requests": { + Type: schema.TypeMap, + Description: "Map describing the minimum amount of compute resources required. If this is omitted for a container, it defaults to `limits` if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/", + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "selector": { + Type: schema.TypeList, + Description: "A label query over volumes to consider for binding.", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "match_expressions": { + Type: schema.TypeList, + Description: "A list of label selector requirements. The requirements are ANDed.", + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Description: "The label key that the selector applies to.", + Optional: true, + ForceNew: true, + }, + "operator": { + Type: schema.TypeString, + Description: "A key's relationship to a set of values. Valid operators ard `In`, `NotIn`, `Exists` and `DoesNotExist`.", + Optional: true, + ForceNew: true, + }, + "values": { + Type: schema.TypeSet, + Description: "An array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch.", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + "match_labels": { + Type: schema.TypeMap, + Description: "A map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of `match_expressions`, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "volume_name": { + Type: schema.TypeString, + Description: "The binding reference to the PersistentVolume backing this claim.", + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + }, + }, + "wait_until_bound": { + Type: schema.TypeBool, + Description: "Whether to wait for the claim to reach `Bound` state (to find volume in which to claim the space)", + Optional: true, + Default: true, + }, + }, + } +} + +func resourceKubernetesPersistentVolumeClaimCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + spec, err := expandPersistentVolumeClaimSpec(d.Get("spec").([]interface{})) + if err != nil { + return err + } + + claim := api.PersistentVolumeClaim{ + ObjectMeta: metadata, + Spec: spec, + } + + log.Printf("[INFO] Creating new persistent volume claim: %#v", claim) + out, err := conn.CoreV1().PersistentVolumeClaims(metadata.Namespace).Create(&claim) + if err != nil { + return err + } + log.Printf("[INFO] Submitted new persistent volume claim: %#v", out) + + d.SetId(buildId(out.ObjectMeta)) + name := out.ObjectMeta.Name + + if d.Get("wait_until_bound").(bool) { + stateConf := &resource.StateChangeConf{ + Target: []string{"Bound"}, + Pending: []string{"Pending"}, + Timeout: d.Timeout(schema.TimeoutCreate), + Refresh: func() (interface{}, string, error) { + out, err := conn.CoreV1().PersistentVolumeClaims(metadata.Namespace).Get(name) + if err != nil { + log.Printf("[ERROR] Received error: %#v", err) + return out, "", err + } + + statusPhase := fmt.Sprintf("%v", out.Status.Phase) + log.Printf("[DEBUG] Persistent volume claim %s status received: %#v", out.Name, statusPhase) + return out, statusPhase, nil + }, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + } + log.Printf("[INFO] Persistent volume claim %s created", out.Name) + + return resourceKubernetesPersistentVolumeClaimRead(d, meta) +} + +func resourceKubernetesPersistentVolumeClaimRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + namespace, name := idParts(d.Id()) + log.Printf("[INFO] Reading persistent volume claim %s", name) + claim, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return err + } + log.Printf("[INFO] Received persistent volume claim: %#v", claim) + err = d.Set("metadata", flattenMetadata(claim.ObjectMeta)) + if err != nil { + return err + } + err = d.Set("spec", flattenPersistentVolumeClaimSpec(claim.Spec)) + if err != nil { + return err + } + + return nil +} + +func resourceKubernetesPersistentVolumeClaimUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + namespace, name := idParts(d.Id()) + + ops := patchMetadata("metadata.0.", "/metadata/", d) + // The whole spec is ForceNew = nothing to update there + data, err := ops.MarshalJSON() + if err != nil { + return fmt.Errorf("Failed to marshal update operations: %s", err) + } + + log.Printf("[INFO] Updating persistent volume claim: %s", ops) + out, err := conn.CoreV1().PersistentVolumeClaims(namespace).Patch(name, pkgApi.JSONPatchType, data) + if err != nil { + return err + } + log.Printf("[INFO] Submitted updated persistent volume claim: %#v", out) + + return resourceKubernetesPersistentVolumeClaimRead(d, meta) +} + +func resourceKubernetesPersistentVolumeClaimDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*kubernetes.Clientset) + + namespace, name := idParts(d.Id()) + log.Printf("[INFO] Deleting persistent volume claim: %#v", name) + err := conn.CoreV1().PersistentVolumeClaims(namespace).Delete(name, &api.DeleteOptions{}) + if err != nil { + return err + } + + log.Printf("[INFO] Persistent volume claim %s deleted", name) + + d.SetId("") + return nil +} + +func resourceKubernetesPersistentVolumeClaimExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*kubernetes.Clientset) + + namespace, name := idParts(d.Id()) + log.Printf("[INFO] Checking persistent volume claim %s", name) + _, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name) + if err != nil { + if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { + return false, nil + } + log.Printf("[DEBUG] Received error: %#v", err) + } + return true, err +} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go new file mode 100644 index 000000000..2436212fa --- /dev/null +++ b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go @@ -0,0 +1,667 @@ +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + api "k8s.io/kubernetes/pkg/api/v1" + kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" +) + +func TestAccKubernetesPersistentVolumeClaim_basic(t *testing.T) { + var conf api.PersistentVolumeClaim + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume_claim.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationOne", "one"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelThree", "three"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelFour", "four"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three", "TestLabelFour": "four"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + ), + }, + { + Config: testAccKubernetesPersistentVolumeClaimConfig_metaModified(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "2"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationTwo", "two"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelOne", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelTwo", "two"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelThree", "three"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", name), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolumeClaim_importBasic(t *testing.T) { + resourceName := "kubernetes_persistent_volume_claim.test" + volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_import(volumeName, claimName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccKubernetesPersistentVolumeClaim_volumeMatch(t *testing.T) { + var pvcConf api.PersistentVolumeClaim + var pvConf api.PersistentVolume + + claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + volumeNameModified := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume_claim.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_volumeMatch(volumeName, claimName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), + testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), + ), + }, + { + Config: testAccKubernetesPersistentVolumeClaimConfig_volumeMatch_modified(volumeNameModified, claimName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeNameModified), + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test2", &pvConf), + testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolumeClaim_labelsMatch(t *testing.T) { + var conf api.PersistentVolumeClaim + claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume_claim.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_labelsMatch(volumeName, claimName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes", "pv.kubernetes.io/bound-by-controller": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_labels.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_labels.TfAccTestEnvironment", "blablah"), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolumeClaim_labelsMatchExpression(t *testing.T) { + var conf api.PersistentVolumeClaim + claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume_claim.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_labelsMatchExpression(volumeName, claimName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes", "pv.kubernetes.io/bound-by-controller": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.key", "TfAccTestEnvironment"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.operator", "In"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.#", "3"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.1187371253", "three"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.2053932785", "one"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.298486374", "two"), + ), + }, + }, + }) +} + +func TestAccKubernetesPersistentVolumeClaim_volumeUpdate(t *testing.T) { + var pvcConf api.PersistentVolumeClaim + var pvConf api.PersistentVolume + + claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "kubernetes_persistent_volume_claim.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, "5Gi"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), + testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), + testAccCheckClaimRef(&pvConf, &ObjectRefStatic{Namespace: "default", Name: claimName}), + ), + }, + { + Config: testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, "10Gi"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), + testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), + testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), + resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), + resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), + testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), + testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), + testAccCheckClaimRef(&pvConf, &ObjectRefStatic{Namespace: "default", Name: claimName}), + ), + }, + }, + }) +} + +func testAccCheckKubernetesPersistentVolumeClaimDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*kubernetes.Clientset) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "kubernetes_persistent_volume_claim" { + continue + } + namespace, name := idParts(rs.Primary.ID) + resp, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name) + if err == nil { + if resp.Namespace == namespace && resp.Name == name { + return fmt.Errorf("Persistent Volume still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func testAccCheckKubernetesPersistentVolumeClaimExists(n string, obj *api.PersistentVolumeClaim) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*kubernetes.Clientset) + namespace, name := idParts(rs.Primary.ID) + out, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name) + if err != nil { + return err + } + + *obj = *out + return nil + } +} + +func testAccCheckClaimRef(pv *api.PersistentVolume, expected *ObjectRefStatic) resource.TestCheckFunc { + return func(s *terraform.State) error { + or := pv.Spec.ClaimRef + if or == nil { + return fmt.Errorf("Expected ClaimRef to be not-nil, specifically %#v", *expected) + } + if or.Namespace != expected.Namespace { + return fmt.Errorf("Expected object reference %q, given: %q", expected.Namespace, or.Namespace) + } + if or.Name != expected.Name { + return fmt.Errorf("Expected object reference %q, given: %q", expected.Name, or.Name) + } + return nil + } +} + +type ObjectRefStatic struct { + Namespace string + Name string +} + +func testAccKubernetesPersistentVolumeClaimConfig_basic(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + annotations { + TestAnnotationOne = "one" + } + labels { + TestLabelOne = "one" + TestLabelThree = "three" + TestLabelFour = "four" + } + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + selector { + match_expressions { + key = "environment" + operator = "In" + values = ["non-exists-12345"] + } + } + } + wait_until_bound = false +} +`, name) +} + +func testAccKubernetesPersistentVolumeClaimConfig_metaModified(name string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + annotations { + TestAnnotationOne = "one" + TestAnnotationTwo = "two" + } + labels { + TestLabelOne = "one" + TestLabelTwo = "two" + TestLabelThree = "three" + } + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + selector { + match_expressions { + key = "environment" + operator = "In" + values = ["non-exists-12345"] + } + } + } + wait_until_bound = false +} +`, name) +} + +func testAccKubernetesPersistentVolumeClaimConfig_import(volumeName, claimName string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" + } +} +`, volumeName, claimName) +} + +func testAccKubernetesPersistentVolumeClaimConfig_volumeMatch(volumeName, claimName string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" + } +} +`, volumeName, claimName) +} + +func testAccKubernetesPersistentVolumeClaimConfig_volumeMatch_modified(volumeName, claimName string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test2" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + volume_name = "${kubernetes_persistent_volume.test2.metadata.0.name}" + } +} +`, volumeName, claimName) +} + +func testAccKubernetesPersistentVolumeClaimConfig_labelsMatch(volumeName, claimName string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + labels { + TfAccTestEnvironment = "blablah" + } + name = "%s" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + selector { + match_labels { + TfAccTestEnvironment = "blablah" + } + } + } +} +`, volumeName, claimName) +} + +func testAccKubernetesPersistentVolumeClaimConfig_labelsMatchExpression(volumeName, claimName string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + labels { + TfAccTestEnvironment = "two" + } + name = "%s" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + selector { + match_expressions { + key = "TfAccTestEnvironment" + operator = "In" + values = ["one", "three", "two"] + } + } + } +} +`, volumeName, claimName) +} + +func testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, storage string) string { + return fmt.Sprintf(` +resource "kubernetes_persistent_volume" "test" { + metadata { + name = "%s" + } + spec { + capacity { + storage = "%s" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test123" + } + } + } +} + +resource "kubernetes_persistent_volume_claim" "test" { + metadata { + name = "%s" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" + } +} +`, volumeName, storage, claimName) +} diff --git a/builtin/providers/kubernetes/structure_persistent_volume_claim.go b/builtin/providers/kubernetes/structure_persistent_volume_claim.go new file mode 100644 index 000000000..20fdbb0de --- /dev/null +++ b/builtin/providers/kubernetes/structure_persistent_volume_claim.go @@ -0,0 +1,134 @@ +package kubernetes + +import ( + "github.com/hashicorp/terraform/helper/schema" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// Flatteners + +func flattenLabelSelector(in *unversioned.LabelSelector) []interface{} { + att := make(map[string]interface{}) + if len(in.MatchLabels) > 0 { + att["match_labels"] = in.MatchLabels + } + if len(in.MatchExpressions) > 0 { + att["match_expressions"] = flattenLabelSelectorRequirement(in.MatchExpressions) + } + return []interface{}{att} +} + +func flattenLabelSelectorRequirement(in []unversioned.LabelSelectorRequirement) []interface{} { + att := make([]interface{}, len(in), len(in)) + for i, n := range in { + m := make(map[string]interface{}) + m["key"] = n.Key + m["operator"] = n.Operator + m["values"] = newStringSet(schema.HashString, n.Values) + att[i] = m + } + return att +} + +func flattenPersistentVolumeClaimSpec(in v1.PersistentVolumeClaimSpec) []interface{} { + att := make(map[string]interface{}) + att["access_modes"] = flattenPersistentVolumeAccessModes(in.AccessModes) + att["resources"] = flattenResourceRequirements(in.Resources) + if in.Selector != nil { + att["selector"] = flattenLabelSelector(in.Selector) + } + if in.VolumeName != "" { + att["volume_name"] = in.VolumeName + } + return []interface{}{att} +} + +func flattenResourceRequirements(in v1.ResourceRequirements) []interface{} { + att := make(map[string]interface{}) + if len(in.Limits) > 0 { + att["limits"] = flattenResourceList(in.Limits) + } + if len(in.Requests) > 0 { + att["requests"] = flattenResourceList(in.Requests) + } + return []interface{}{att} +} + +// Expanders + +func expandLabelSelector(l []interface{}) *unversioned.LabelSelector { + if len(l) == 0 || l[0] == nil { + return &unversioned.LabelSelector{} + } + in := l[0].(map[string]interface{}) + obj := &unversioned.LabelSelector{} + if v, ok := in["match_labels"].(map[string]interface{}); ok && len(v) > 0 { + obj.MatchLabels = expandStringMap(v) + } + if v, ok := in["match_expressions"].([]interface{}); ok && len(v) > 0 { + obj.MatchExpressions = expandLabelSelectorRequirement(v) + } + return obj +} + +func expandLabelSelectorRequirement(l []interface{}) []unversioned.LabelSelectorRequirement { + if len(l) == 0 || l[0] == nil { + return []unversioned.LabelSelectorRequirement{} + } + obj := make([]unversioned.LabelSelectorRequirement, len(l), len(l)) + for i, n := range l { + in := n.(map[string]interface{}) + obj[i] = unversioned.LabelSelectorRequirement{ + Key: in["key"].(string), + Operator: unversioned.LabelSelectorOperator(in["operator"].(string)), + Values: sliceOfString(in["values"].(*schema.Set).List()), + } + } + return obj +} + +func expandPersistentVolumeClaimSpec(l []interface{}) (v1.PersistentVolumeClaimSpec, error) { + if len(l) == 0 || l[0] == nil { + return v1.PersistentVolumeClaimSpec{}, nil + } + in := l[0].(map[string]interface{}) + resourceRequirements, err := expandResourceRequirements(in["resources"].([]interface{})) + if err != nil { + return v1.PersistentVolumeClaimSpec{}, err + } + obj := v1.PersistentVolumeClaimSpec{ + AccessModes: expandPersistentVolumeAccessModes(in["access_modes"].(*schema.Set).List()), + Resources: resourceRequirements, + } + if v, ok := in["selector"].([]interface{}); ok && len(v) > 0 { + obj.Selector = expandLabelSelector(v) + } + if v, ok := in["volume_name"].(string); ok { + obj.VolumeName = v + } + return obj, nil +} + +func expandResourceRequirements(l []interface{}) (v1.ResourceRequirements, error) { + if len(l) == 0 || l[0] == nil { + return v1.ResourceRequirements{}, nil + } + in := l[0].(map[string]interface{}) + obj := v1.ResourceRequirements{} + if v, ok := in["limits"].(map[string]interface{}); ok && len(v) > 0 { + var err error + obj.Limits, err = expandMapToResourceList(v) + if err != nil { + return obj, err + } + } + if v, ok := in["requests"].(map[string]interface{}); ok && len(v) > 0 { + var err error + obj.Requests, err = expandMapToResourceList(v) + if err != nil { + return obj, err + } + } + return obj, nil +} diff --git a/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown b/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown new file mode 100644 index 000000000..6e5b656b9 --- /dev/null +++ b/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown @@ -0,0 +1,114 @@ +--- +layout: "kubernetes" +page_title: "Kubernetes: kubernetes_persistent_volume_claim" +sidebar_current: "docs-kubernetes-resource-persistent-volume-claim" +description: |- + This resource allows the user to request for and claim to a persistent volume. +--- + +# kubernetes_persistent_volume_claim + +This resource allows the user to request for and claim to a persistent volume. + +## Example Usage + +```hcl +resource "kubernetes_persistent_volume_claim" "example" { + metadata { + name = "exampleclaimname" + } + spec { + access_modes = ["ReadWriteMany"] + resources { + requests { + storage = "5Gi" + } + } + volume_name = "${kubernetes_persistent_volume.example.metadata.0.name}" + } +} + +resource "kubernetes_persistent_volume" "example" { + metadata { + name = "examplevolumename" + } + spec { + capacity { + storage = "10Gi" + } + access_modes = ["ReadWriteMany"] + persistent_volume_source { + gce_persistent_disk { + pd_name = "test-123" + } + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `metadata` - (Required) Standard persistent volume claim's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata +* `spec` - (Required) Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims +* `wait_until_bound` - (Optional) Whether to wait for the claim to reach `Bound` state (to find volume in which to claim the space) + +## Nested Blocks + +### `metadata` + +#### Arguments + +* `annotations` - (Optional) An unstructured key value map stored with the persistent volume claim that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations +* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency +* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the persistent volume claim. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels +* `name` - (Optional) Name of the persistent volume claim, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names +* `namespace` - (Optional) Namespace defines the space within which name of the persistent volume claim must be unique. + +#### Attributes + +* `generation` - A sequence number representing a specific generation of the desired state. +* `resource_version` - An opaque value that represents the internal version of this persistent volume claim that can be used by clients to determine when persistent volume claim has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency +* `self_link` - A URL representing this persistent volume claim. +* `uid` - The unique in time and space value for this persistent volume claim. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + +### `spec` + +#### Arguments + +* `access_modes` - (Required) A set of the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 +* `resources` - (Required) A list of the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources +* `selector` - (Optional) A label query over volumes to consider for binding. +* `volume_name` - (Optional) The binding reference to the PersistentVolume backing this claim. + +### `match_expressions` + +#### Arguments + +* `key` - (Optional) The label key that the selector applies to. +* `operator` - (Optional) A key's relationship to a set of values. Valid operators ard `In`, `NotIn`, `Exists` and `DoesNotExist`. +* `values` - (Optional) An array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch. + + +### `resources` + +#### Arguments + +* `limits` - (Optional) Map describing the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/ +* `requests` - (Optional) Map describing the minimum amount of compute resources required. If this is omitted for a container, it defaults to `limits` if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/ + +### `selector` + +#### Arguments + +* `match_expressions` - (Optional) A list of label selector requirements. The requirements are ANDed. +* `match_labels` - (Optional) A map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of `match_expressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + +## Import + +Persistent Volume Claim can be imported using its name, e.g. + +``` +$ terraform import kubernetes_persistent_volume_claim.example example-name +``` diff --git a/website/source/layouts/kubernetes.erb b/website/source/layouts/kubernetes.erb index 39fcd0815..2dda81b85 100644 --- a/website/source/layouts/kubernetes.erb +++ b/website/source/layouts/kubernetes.erb @@ -22,6 +22,9 @@ > kubernetes_persistent_volume + > + kubernetes_persistent_volume_claim + > kubernetes_secret From bdc6d092b6d6ec3d74001638652b8da1da88658e Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 08:34:12 +0100 Subject: [PATCH 070/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d577a1e2..e629ebc40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: * **New Resource:** `aws_ses_domain_identity` [GH-13098] * **New Resource:** `azurerm_managed_disk` [GH-12455] * **New Resource:** `kubernetes_persistent_volume` [GH-13277] + * **New Resource:** `kubernetes_persistent_volume_claim` [GH-13527] * **New Resource:** `kubernetes_secret` [GH-12960] * **New Data Source:** `aws_iam_role` [GH-13213] From 5f02c0b61a0f95bbe60a376227c8aaa60765fba0 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 08:35:00 +0100 Subject: [PATCH 071/342] provider/aws: Add support for api_gateway_stage (#13540) --- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_api_gateway_stage.go | 342 ++++++++++++++++++ .../resource_aws_api_gateway_stage_test.go | 196 ++++++++++ .../aws/r/api_gateway_stage.html.markdown | 78 ++++ website/source/layouts/aws.erb | 3 + 5 files changed, 620 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_api_gateway_stage.go create mode 100644 builtin/providers/aws/resource_aws_api_gateway_stage_test.go create mode 100644 website/source/docs/providers/aws/r/api_gateway_stage.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 67e813a9e..9a2f7e5ff 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -221,6 +221,7 @@ func Provider() terraform.ResourceProvider { "aws_api_gateway_model": resourceAwsApiGatewayModel(), "aws_api_gateway_resource": resourceAwsApiGatewayResource(), "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), + "aws_api_gateway_stage": resourceAwsApiGatewayStage(), "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), diff --git a/builtin/providers/aws/resource_aws_api_gateway_stage.go b/builtin/providers/aws/resource_aws_api_gateway_stage.go new file mode 100644 index 000000000..1b8579e3d --- /dev/null +++ b/builtin/providers/aws/resource_aws_api_gateway_stage.go @@ -0,0 +1,342 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayStage() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayStageCreate, + Read: resourceAwsApiGatewayStageRead, + Update: resourceAwsApiGatewayStageUpdate, + Delete: resourceAwsApiGatewayStageDelete, + + Schema: map[string]*schema.Schema{ + "cache_cluster_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "cache_cluster_size": { + Type: schema.TypeString, + Optional: true, + }, + "client_certificate_id": { + Type: schema.TypeString, + Optional: true, + }, + "deployment_id": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "documentation_version": { + Type: schema.TypeString, + Optional: true, + }, + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "stage_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "variables": { + Type: schema.TypeMap, + Optional: true, + }, + }, + } +} + +func resourceAwsApiGatewayStageCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + d.Partial(true) + + input := apigateway.CreateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + DeploymentId: aws.String(d.Get("deployment_id").(string)), + } + + waitForCache := false + if v, ok := d.GetOk("cache_cluster_enabled"); ok { + input.CacheClusterEnabled = aws.Bool(v.(bool)) + waitForCache = true + } + if v, ok := d.GetOk("cache_cluster_size"); ok { + input.CacheClusterSize = aws.String(v.(string)) + waitForCache = true + } + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("documentation_version"); ok { + input.DocumentationVersion = aws.String(v.(string)) + } + if vars, ok := d.GetOk("variables"); ok { + variables := make(map[string]string, 0) + for k, v := range vars.(map[string]interface{}) { + variables[k] = v.(string) + } + input.Variables = aws.StringMap(variables) + } + + out, err := conn.CreateStage(&input) + if err != nil { + return fmt.Errorf("Error creating API Gateway Stage: %s", err) + } + + d.SetId(fmt.Sprintf("ags-%s-%s", d.Get("rest_api_id").(string), d.Get("stage_name").(string))) + + d.SetPartial("rest_api_id") + d.SetPartial("stage_name") + d.SetPartial("deployment_id") + d.SetPartial("description") + d.SetPartial("variables") + + if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + "CREATE_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "FLUSH_IN_PROGRESS", + }, + Target: []string{"AVAILABLE"}, + Refresh: apiGatewayStageCacheRefreshFunc(conn, + d.Get("rest_api_id").(string), + d.Get("stage_name").(string)), + Timeout: 90 * time.Minute, + } + + _, err := stateConf.WaitForState() + if err != nil { + return err + } + } + + d.SetPartial("cache_cluster_enabled") + d.SetPartial("cache_cluster_size") + d.Partial(false) + + if _, ok := d.GetOk("client_certificate_id"); ok { + return resourceAwsApiGatewayStageUpdate(d, meta) + } + return resourceAwsApiGatewayStageRead(d, meta) +} + +func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Stage %s", d.Id()) + input := apigateway.GetStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + stage, err := conn.GetStage(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API Gateway Stage %s not found, removing", d.Id()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) + + d.Set("client_certificate_id", stage.ClientCertificateId) + + if stage.CacheClusterStatus != nil && *stage.CacheClusterStatus == "DELETE_IN_PROGRESS" { + d.Set("cache_cluster_enabled", false) + d.Set("cache_cluster_size", nil) + } else { + d.Set("cache_cluster_enabled", stage.CacheClusterEnabled) + d.Set("cache_cluster_size", stage.CacheClusterSize) + } + + d.Set("deployment_id", stage.DeploymentId) + d.Set("description", stage.Description) + d.Set("documentation_version", stage.DocumentationVersion) + d.Set("variables", aws.StringValueMap(stage.Variables)) + + return nil +} + +func resourceAwsApiGatewayStageUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + d.Partial(true) + operations := make([]*apigateway.PatchOperation, 0) + waitForCache := false + if d.HasChange("cache_cluster_enabled") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/cacheClusterEnabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("cache_cluster_enabled").(bool))), + }) + waitForCache = true + } + if d.HasChange("cache_cluster_size") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/cacheClusterSize"), + Value: aws.String(d.Get("cache_cluster_size").(string)), + }) + waitForCache = true + } + if d.HasChange("client_certificate_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/clientCertificateId"), + Value: aws.String(d.Get("client_certificate_id").(string)), + }) + } + if d.HasChange("deployment_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/deploymentId"), + Value: aws.String(d.Get("deployment_id").(string)), + }) + } + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + if d.HasChange("documentation_version") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/documentationVersion"), + Value: aws.String(d.Get("documentation_version").(string)), + }) + } + if d.HasChange("variables") { + o, n := d.GetChange("variables") + oldV := o.(map[string]interface{}) + newV := n.(map[string]interface{}) + operations = append(operations, diffVariablesOps("/variables/", oldV, newV)...) + } + + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + PatchOperations: operations, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + out, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + d.SetPartial("client_certificate_id") + d.SetPartial("deployment_id") + d.SetPartial("description") + d.SetPartial("variables") + + if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + "CREATE_IN_PROGRESS", + "FLUSH_IN_PROGRESS", + }, + Target: []string{ + "AVAILABLE", + // There's an AWS API bug (raised & confirmed in Sep 2016 by support) + // which causes the stage to remain in deletion state forever + "DELETE_IN_PROGRESS", + }, + Refresh: apiGatewayStageCacheRefreshFunc(conn, + d.Get("rest_api_id").(string), + d.Get("stage_name").(string)), + Timeout: 30 * time.Minute, + } + + _, err := stateConf.WaitForState() + if err != nil { + return err + } + } + + d.SetPartial("cache_cluster_enabled") + d.SetPartial("cache_cluster_size") + d.Partial(false) + + return resourceAwsApiGatewayStageRead(d, meta) +} + +func diffVariablesOps(prefix string, oldVars, newVars map[string]interface{}) []*apigateway.PatchOperation { + ops := make([]*apigateway.PatchOperation, 0) + + for k, _ := range oldVars { + if _, ok := newVars[k]; !ok { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(prefix + k), + }) + } + } + + for k, v := range newVars { + newValue := v.(string) + + if oldV, ok := oldVars[k]; ok { + oldValue := oldV.(string) + if oldValue == newValue { + continue + } + } + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + k), + Value: aws.String(newValue), + }) + } + + return ops +} + +func apiGatewayStageCacheRefreshFunc(conn *apigateway.APIGateway, apiId, stageName string) func() (interface{}, string, error) { + return func() (interface{}, string, error) { + input := apigateway.GetStageInput{ + RestApiId: aws.String(apiId), + StageName: aws.String(stageName), + } + out, err := conn.GetStage(&input) + if err != nil { + return 42, "", err + } + + return out, *out.CacheClusterStatus, nil + } +} + +func resourceAwsApiGatewayStageDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Stage: %s", d.Id()) + input := apigateway.DeleteStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + _, err := conn.DeleteStage(&input) + if err != nil { + return fmt.Errorf("Deleting API Gateway Stage failed: %s", err) + } + + return nil +} diff --git a/builtin/providers/aws/resource_aws_api_gateway_stage_test.go b/builtin/providers/aws/resource_aws_api_gateway_stage_test.go new file mode 100644 index 000000000..c64ac1c6d --- /dev/null +++ b/builtin/providers/aws/resource_aws_api_gateway_stage_test.go @@ -0,0 +1,196 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSAPIGatewayStage_basic(t *testing.T) { + var conf apigateway.Stage + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayStageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAPIGatewayStageConfig_basic(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayStageExists("aws_api_gateway_stage.test", &conf), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "stage_name", "prod"), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "cache_cluster_enabled", "true"), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "cache_cluster_size", "0.5"), + ), + }, + resource.TestStep{ + Config: testAccAWSAPIGatewayStageConfig_updated(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayStageExists("aws_api_gateway_stage.test", &conf), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "stage_name", "prod"), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "cache_cluster_enabled", "false"), + ), + }, + resource.TestStep{ + Config: testAccAWSAPIGatewayStageConfig_basic(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayStageExists("aws_api_gateway_stage.test", &conf), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "stage_name", "prod"), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "cache_cluster_enabled", "true"), + resource.TestCheckResourceAttr("aws_api_gateway_stage.test", "cache_cluster_size", "0.5"), + ), + }, + }, + }) +} + +func testAccCheckAWSAPIGatewayStageExists(n string, res *apigateway.Stage) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No API Gateway Stage ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).apigateway + + req := &apigateway.GetStageInput{ + RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), + StageName: aws.String(rs.Primary.Attributes["stage_name"]), + } + out, err := conn.GetStage(req) + if err != nil { + return err + } + + *res = *out + + return nil + } +} + +func testAccCheckAWSAPIGatewayStageDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).apigateway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_api_gateway_stage" { + continue + } + + req := &apigateway.GetStageInput{ + RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), + StageName: aws.String(rs.Primary.Attributes["stage_name"]), + } + out, err := conn.GetStage(req) + if err == nil { + return fmt.Errorf("API Gateway Stage still exists: %s", out) + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + if awsErr.Code() != "NotFoundException" { + return err + } + + return nil + } + + return nil +} + +const testAccAWSAPIGatewayStageConfig_base = ` +resource "aws_api_gateway_rest_api" "test" { + name = "tf-acc-test" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" +} + +resource "aws_api_gateway_method_response" "error" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_method.test.http_method}" + status_code = "400" +} + +resource "aws_api_gateway_integration" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_method.test.http_method}" + + type = "HTTP" + uri = "https://www.google.co.uk" + integration_http_method = "GET" +} + +resource "aws_api_gateway_integration_response" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_integration.test.http_method}" + status_code = "${aws_api_gateway_method_response.error.status_code}" +} + +resource "aws_api_gateway_deployment" "dev" { + depends_on = ["aws_api_gateway_integration.test"] + + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + stage_name = "dev" + description = "This is a dev env" + + variables = { + "a" = "2" + } +} +` + +func testAccAWSAPIGatewayStageConfig_basic() string { + return testAccAWSAPIGatewayStageConfig_base + ` +resource "aws_api_gateway_stage" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + stage_name = "prod" + deployment_id = "${aws_api_gateway_deployment.dev.id}" + cache_cluster_enabled = true + cache_cluster_size = "0.5" + variables { + one = "1" + two = "2" + } +} +` +} + +func testAccAWSAPIGatewayStageConfig_updated() string { + return testAccAWSAPIGatewayStageConfig_base + ` +resource "aws_api_gateway_stage" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + stage_name = "prod" + deployment_id = "${aws_api_gateway_deployment.dev.id}" + cache_cluster_enabled = false + description = "Hello world" + variables { + one = "1" + three = "3" + } +} +` +} diff --git a/website/source/docs/providers/aws/r/api_gateway_stage.html.markdown b/website/source/docs/providers/aws/r/api_gateway_stage.html.markdown new file mode 100644 index 000000000..155837a2b --- /dev/null +++ b/website/source/docs/providers/aws/r/api_gateway_stage.html.markdown @@ -0,0 +1,78 @@ +--- +layout: "aws" +page_title: "AWS: aws_api_gateway_stage" +sidebar_current: "docs-aws-resource-api-gateway-stage" +description: |- + Provides an API Gateway Stage. +--- + +# aws\_api\_gateway\_stage + +Provides an API Gateway Stage. + +## Example Usage + +```hcl +resource "aws_api_gateway_stage" "test" { + stage_name = "prod" + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + deployment_id = "${aws_api_gateway_deployment.test.id}" +} + +resource "aws_api_gateway_rest_api" "test" { + name = "MyDemoAPI" + description = "This is my API for demonstration purposes" +} + +resource "aws_api_gateway_deployment" "test" { + depends_on = ["aws_api_gateway_integration.test"] + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + stage_name = "dev" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "mytestresource" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" +} + +resource "aws_api_gateway_method_settings" "s" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + stage_name = "${aws_api_gateway_stage.test.stage_name}" + method_path = "${aws_api_gateway_resource.test.path_part}/${aws_api_gateway_method.test.http_method}" + + settings { + metrics_enabled = true + logging_level = "INFO" + } +} + +resource "aws_api_gateway_integration" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_method.test.http_method}" + type = "MOCK" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `rest_api_id` - (Required) The ID of the associated REST API +* `stage_name` - (Required) The name of the stage +* `deployment_id` - (Required) The ID of the deployment that the stage points to +* `cache_cluster_enabled` - (Optional) Specifies whether a cache cluster is enabled for the stage +* `cache_cluster_size` - (Optional) The size of the cache cluster for the stage, if enabled. + Allowed values include `0.5`, `1.6`, `6.1`, `13.5`, `28.4`, `58.2`, `118` and `237`. +* `client_certificate_id` - (Optional) The identifier of a client certificate for the stage. +* `description` - (Optional) The description of the stage +* `documentation_version` - (Optional) The version of the associated API documentation +* `variables` - (Optional) A map that defines the stage variables diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 716a53411..dba72163b 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -185,6 +185,9 @@ > aws_api_gateway_rest_api + > + aws_api_gateway_stage + > aws_api_gateway_usage_plan From 97067815e2879a631b418151a3edc4a85437a001 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 08:35:50 +0100 Subject: [PATCH 072/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e629ebc40..4da7f1027 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: + * **New Resource:** `aws_api_gateway_stage` [GH-13540] * **New Resource:** `aws_iam_openid_connect_provider` [GH-13456] * **New Resource:** `aws_lightsail_static_ip` [GH-13175] * **New Resource:** `aws_lightsail_static_ip_attachment` [GH-13207] From 70e69a73872bea6e8c4121ff743c2b2c26ae1485 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Apr 2017 11:44:07 +0100 Subject: [PATCH 073/342] Ensuring we output the error when things fail --- .../azurerm/resource_arm_eventhub_authorization_rule.go | 6 +++--- .../azurerm/resource_arm_eventhub_consumer_group.go | 4 ++-- .../providers/azurerm/resource_arm_eventhub_namespace.go | 7 +++---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go index c231f0bfd..f6f081677 100644 --- a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go +++ b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go @@ -144,7 +144,7 @@ func resourceArmEventHubAuthorizationRuleRead(d *schema.ResourceData, meta inter resp, err := client.GetAuthorizationRule(resGroup, namespaceName, eventHubName, name) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule %s: %s", name, err) + return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule %s: %+v", name, err) } if resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -153,7 +153,7 @@ func resourceArmEventHubAuthorizationRuleRead(d *schema.ResourceData, meta inter keysResp, err := client.ListKeys(resGroup, namespaceName, eventHubName, name) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %s", name, err) + return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %+v", name, err) } d.Set("name", name) @@ -187,7 +187,7 @@ func resourceArmEventHubAuthorizationRuleDelete(d *schema.ResourceData, meta int resp, err := eventhubClient.DeleteAuthorizationRule(resGroup, namespaceName, eventHubName, name) if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Authorization Rule '%s': %s", name, err) + return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Authorization Rule '%s': %+v", name, err) } return nil diff --git a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go index f188c15f5..f098ff808 100644 --- a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go +++ b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go @@ -109,7 +109,7 @@ func resourceArmEventHubConsumerGroupRead(d *schema.ResourceData, meta interface resp, err := eventhubClient.Get(resGroup, namespaceName, eventHubName, name) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Consumer Group %s: %s", name, err) + return fmt.Errorf("Error making Read request on Azure EventHub Consumer Group %s: %+v", name, err) } if resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -141,7 +141,7 @@ func resourceArmEventHubConsumerGroupDelete(d *schema.ResourceData, meta interfa resp, err := eventhubClient.Delete(resGroup, namespaceName, eventHubName, name) if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Consumer Group '%s': %s", name, err) + return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Consumer Group '%s': %+v", name, err) } return nil diff --git a/builtin/providers/azurerm/resource_arm_eventhub_namespace.go b/builtin/providers/azurerm/resource_arm_eventhub_namespace.go index e03281e22..d39016799 100644 --- a/builtin/providers/azurerm/resource_arm_eventhub_namespace.go +++ b/builtin/providers/azurerm/resource_arm_eventhub_namespace.go @@ -131,7 +131,7 @@ func resourceArmEventHubNamespaceRead(d *schema.ResourceData, meta interface{}) resp, err := namespaceClient.Get(resGroup, name) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Namespace %s: %s", name, err) + return fmt.Errorf("Error making Read request on Azure EventHub Namespace %s: %+v", name, err) } if resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -146,7 +146,7 @@ func resourceArmEventHubNamespaceRead(d *schema.ResourceData, meta interface{}) keys, err := namespaceClient.ListKeys(resGroup, name, eventHubNamespaceDefaultAuthorizationRule) if err != nil { - log.Printf("[ERROR] Unable to List default keys for Namespace %s: %s", name, err) + log.Printf("[ERROR] Unable to List default keys for Namespace %s: %+v", name, err) } else { d.Set("default_primary_connection_string", keys.PrimaryConnectionString) d.Set("default_secondary_connection_string", keys.SecondaryConnectionString) @@ -160,7 +160,6 @@ func resourceArmEventHubNamespaceRead(d *schema.ResourceData, meta interface{}) } func resourceArmEventHubNamespaceDelete(d *schema.ResourceData, meta interface{}) error { - namespaceClient := meta.(*ArmClient).eventHubNamespacesClient id, err := parseAzureResourceID(d.Id()) @@ -173,7 +172,7 @@ func resourceArmEventHubNamespaceDelete(d *schema.ResourceData, meta interface{} resp, err := namespaceClient.Delete(resGroup, name, make(chan struct{})) if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Namespace'%s': %s", name, err) + return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Namespace '%s': %+v", name, err) } return nil From 8879720c18150faa9c7d9e3d5679dd50615dd517 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Wed, 12 Apr 2017 12:34:22 +0100 Subject: [PATCH 074/342] Updating the change log to include #13570 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4da7f1027..fa8ba49aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ BUG FIXES: * provider/azurerm: Network Security Group - ignoring protocol casing at Import time [GH-13153] * provider/azurerm: Fix crash when importing Local Network Gateways [GH-13261] * provider/azurerm: Defaulting the value of `duplicate_detection_history_time_window` for `azurerm_servicebus_topic` [GH-13223] + * provider/azurerm: Event Hubs making the Location field idempotent [GH-13570] * provider/bitbucket: Fixed issue where provider would fail with an "EOF" error on some operations [GH-13390] * provider/kubernetes: Use PATCH to update namespace [GH-13114] * provider/ns1: No splitting answer on SPF records. [GH-13260] From 1cb1342a2bc569dbef2eb008310be9766db92f72 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 09:24:42 -0400 Subject: [PATCH 075/342] Clarify test is for panic fix point to github issue --- builtin/providers/aws/resource_aws_route_table_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_route_table_test.go b/builtin/providers/aws/resource_aws_route_table_test.go index 932f33e9d..b4b764d37 100644 --- a/builtin/providers/aws/resource_aws_route_table_test.go +++ b/builtin/providers/aws/resource_aws_route_table_test.go @@ -184,7 +184,8 @@ func TestAccAWSRouteTable_tags(t *testing.T) { }) } -func TestAccAWSRouteTable_panic(t *testing.T) { +// For GH-13545, Fixes panic on an empty route config block +func TestAccAWSRouteTable_panicEmptyRoute(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: "aws_route_table.foo", @@ -192,7 +193,7 @@ func TestAccAWSRouteTable_panic(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfigPanic, + Config: testAccRouteTableConfigPanicEmptyRoute, ExpectError: regexp.MustCompile("The request must contain the parameter destinationCidrBlock or destinationIpv6CidrBlock"), }, }, @@ -514,7 +515,8 @@ resource "aws_route_table" "foo" { } ` -const testAccRouteTableConfigPanic = ` +// For GH-13545 +const testAccRouteTableConfigPanicEmptyRoute = ` resource "aws_vpc" "foo" { cidr_block = "10.2.0.0/16" } From 3cd125a9011d620e4adc181ac8ae0461d59c5616 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 10:18:49 -0400 Subject: [PATCH 076/342] provider/aws: Add import to codecommit_repository Adds import capability to the `aws_codecommit_repository` resource. Also fixes an issue in the Read function where both `description` and `resource_name` attributes weren't being updated and set in the Schema. Fixes: #13559 ``` $ make testacc TEST=./builtin/providers/aws TESTARGS="-run=TestAccAWSCodeCommitRepository" ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/04/12 10:14:44 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSCodeCommitRepository -timeout 120m === RUN TestAccAWSCodeCommitRepository_importBasic --- PASS: TestAccAWSCodeCommitRepository_importBasic (16.11s) === RUN TestAccAWSCodeCommitRepository_basic --- PASS: TestAccAWSCodeCommitRepository_basic (14.97s) === RUN TestAccAWSCodeCommitRepository_withChanges --- PASS: TestAccAWSCodeCommitRepository_withChanges (26.71s) === RUN TestAccAWSCodeCommitRepository_create_default_branch --- PASS: TestAccAWSCodeCommitRepository_create_default_branch (14.34s) === RUN TestAccAWSCodeCommitRepository_create_and_update_default_branch --- PASS: TestAccAWSCodeCommitRepository_create_and_update_default_branch (27.90s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 100.071s ``` --- .../import_aws_codecommit_repository_test.go | 29 +++++++++++++++++++ .../aws/resource_aws_codecommit_repository.go | 19 +++++++----- ...resource_aws_codecommit_repository_test.go | 12 ++++---- 3 files changed, 47 insertions(+), 13 deletions(-) create mode 100644 builtin/providers/aws/import_aws_codecommit_repository_test.go diff --git a/builtin/providers/aws/import_aws_codecommit_repository_test.go b/builtin/providers/aws/import_aws_codecommit_repository_test.go new file mode 100644 index 000000000..ea203c9c1 --- /dev/null +++ b/builtin/providers/aws/import_aws_codecommit_repository_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSCodeCommitRepository_importBasic(t *testing.T) { + resName := "aws_codecommit_repository.test" + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCodeCommitRepository_basic(rInt), + }, + { + ResourceName: resName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_codecommit_repository.go b/builtin/providers/aws/resource_aws_codecommit_repository.go index c9b2bd2dc..a477c274e 100644 --- a/builtin/providers/aws/resource_aws_codecommit_repository.go +++ b/builtin/providers/aws/resource_aws_codecommit_repository.go @@ -15,9 +15,12 @@ func resourceAwsCodeCommitRepository() *schema.Resource { Update: resourceAwsCodeCommitRepositoryUpdate, Read: resourceAwsCodeCommitRepositoryRead, Delete: resourceAwsCodeCommitRepositoryDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ - "repository_name": &schema.Schema{ + "repository_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -31,7 +34,7 @@ func resourceAwsCodeCommitRepository() *schema.Resource { }, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { @@ -44,27 +47,27 @@ func resourceAwsCodeCommitRepository() *schema.Resource { }, }, - "arn": &schema.Schema{ + "arn": { Type: schema.TypeString, Computed: true, }, - "repository_id": &schema.Schema{ + "repository_id": { Type: schema.TypeString, Computed: true, }, - "clone_url_http": &schema.Schema{ + "clone_url_http": { Type: schema.TypeString, Computed: true, }, - "clone_url_ssh": &schema.Schema{ + "clone_url_ssh": { Type: schema.TypeString, Computed: true, }, - "default_branch": &schema.Schema{ + "default_branch": { Type: schema.TypeString, Optional: true, }, @@ -130,6 +133,8 @@ func resourceAwsCodeCommitRepositoryRead(d *schema.ResourceData, meta interface{ d.Set("arn", out.RepositoryMetadata.Arn) d.Set("clone_url_http", out.RepositoryMetadata.CloneUrlHttp) d.Set("clone_url_ssh", out.RepositoryMetadata.CloneUrlSsh) + d.Set("description", out.RepositoryMetadata.RepositoryDescription) + d.Set("repository_name", out.RepositoryMetadata.RepositoryName) if _, ok := d.GetOk("default_branch"); ok { if out.RepositoryMetadata.DefaultBranch != nil { diff --git a/builtin/providers/aws/resource_aws_codecommit_repository_test.go b/builtin/providers/aws/resource_aws_codecommit_repository_test.go index e14882660..6bc3dfab8 100644 --- a/builtin/providers/aws/resource_aws_codecommit_repository_test.go +++ b/builtin/providers/aws/resource_aws_codecommit_repository_test.go @@ -19,7 +19,7 @@ func TestAccAWSCodeCommitRepository_basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCodeCommitRepository_basic(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), @@ -36,7 +36,7 @@ func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCodeCommitRepository_basic(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), @@ -44,7 +44,7 @@ func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { "aws_codecommit_repository.test", "description", "This is a test description"), ), }, - resource.TestStep{ + { Config: testAccCodeCommitRepository_withChanges(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), @@ -63,7 +63,7 @@ func TestAccAWSCodeCommitRepository_create_default_branch(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCodeCommitRepository_with_default_branch(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), @@ -82,7 +82,7 @@ func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing. Providers: testAccProviders, CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCodeCommitRepository_basic(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), @@ -90,7 +90,7 @@ func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing. "aws_codecommit_repository.test", "default_branch"), ), }, - resource.TestStep{ + { Config: testAccCodeCommitRepository_with_default_branch(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), From 53fe7c22932df11cbc0d55ce9b41ff3130a90275 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 10:48:53 -0400 Subject: [PATCH 077/342] provider/template: Fix panic in cloudinit config Fixes an uncaught panic during an interface cast in the `template_cloudinit_config` data source. Fixes: #13572 ``` $ make test TEST=./builtin/providers/template TESTARGS="-v -run=TestRender_handlePanic" ==> Checking that code complies with gofmt requirements... ==> Checking AWS provider for unchecked errors... ==> NOTE: at this time we only look for uncheck errors in the AWS package go generate $(go list ./... | grep -v /terraform/vendor/) 2017/04/12 10:46:33 Generated command/internal_plugin_list.go go test -i ./builtin/providers/template || exit 1 echo ./builtin/providers/template | \ xargs -t -n4 go test -v -run=TestRender_handlePanic -timeout=60s -parallel=4 go test -v -run=TestRender_handlePanic -timeout=60s -parallel=4 ./builtin/providers/template === RUN TestRender_handlePanic --- PASS: TestRender_handlePanic (0.00s) PASS ok github.com/hashicorp/terraform/builtin/providers/template 0.028s ``` --- .../template/datasource_cloudinit_config.go | 21 +++++++------ .../datasource_cloudinit_config_test.go | 31 +++++++++++++------ 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/builtin/providers/template/datasource_cloudinit_config.go b/builtin/providers/template/datasource_cloudinit_config.go index edbe0d13e..9fee9fb49 100644 --- a/builtin/providers/template/datasource_cloudinit_config.go +++ b/builtin/providers/template/datasource_cloudinit_config.go @@ -19,41 +19,41 @@ func dataSourceCloudinitConfig() *schema.Resource { Read: dataSourceCloudinitConfigRead, Schema: map[string]*schema.Schema{ - "part": &schema.Schema{ + "part": { Type: schema.TypeList, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "content_type": &schema.Schema{ + "content_type": { Type: schema.TypeString, Optional: true, }, - "content": &schema.Schema{ + "content": { Type: schema.TypeString, Required: true, }, - "filename": &schema.Schema{ + "filename": { Type: schema.TypeString, Optional: true, }, - "merge_type": &schema.Schema{ + "merge_type": { Type: schema.TypeString, Optional: true, }, }, }, }, - "gzip": &schema.Schema{ + "gzip": { Type: schema.TypeBool, Optional: true, Default: true, }, - "base64_encode": &schema.Schema{ + "base64_encode": { Type: schema.TypeBool, Optional: true, Default: true, }, - "rendered": &schema.Schema{ + "rendered": { Type: schema.TypeString, Computed: true, Description: "rendered cloudinit configuration", @@ -84,7 +84,10 @@ func renderCloudinitConfig(d *schema.ResourceData) (string, error) { cloudInitParts := make(cloudInitParts, len(partsValue.([]interface{}))) for i, v := range partsValue.([]interface{}) { - p := v.(map[string]interface{}) + p, castOk := v.(map[string]interface{}) + if !castOk { + return "", fmt.Errorf("Unable to parse parts in cloudinit resource declaration") + } part := cloudInitPart{} if p, ok := p["content_type"]; ok { diff --git a/builtin/providers/template/datasource_cloudinit_config_test.go b/builtin/providers/template/datasource_cloudinit_config_test.go index e3e7225db..f40fc8392 100644 --- a/builtin/providers/template/datasource_cloudinit_config_test.go +++ b/builtin/providers/template/datasource_cloudinit_config_test.go @@ -3,6 +3,8 @@ package template import ( "testing" + "regexp" + r "github.com/hashicorp/terraform/helper/resource" ) @@ -58,7 +60,7 @@ func TestRender(t *testing.T) { r.UnitTest(t, r.TestCase{ Providers: testProviders, Steps: []r.TestStep{ - r.TestStep{ + { Config: tt.ResourceBlock, Check: r.ComposeTestCheckFunc( r.TestCheckResourceAttr("data.template_cloudinit_config.foo", "rendered", tt.Expected), @@ -69,12 +71,23 @@ func TestRender(t *testing.T) { } } -var testCloudInitConfig_basic = ` -data "template_cloudinit_config" "config" { - part { - content_type = "text/x-shellscript" - content = "baz" - } -}` +// From GH-13572, Correctly handle panic on a misconfigured cloudinit part +func TestRender_handlePanic(t *testing.T) { + r.UnitTest(t, r.TestCase{ + Providers: testProviders, + Steps: []r.TestStep{ + { + Config: testCloudInitConfig_misconfiguredParts, + ExpectError: regexp.MustCompile("Unable to parse parts in cloudinit resource declaration"), + }, + }, + }) +} -var testCloudInitConfig_basic_expected = `Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n` +var testCloudInitConfig_misconfiguredParts = ` +data "template_cloudinit_config" "foo" { + part { + content = "" + } +} +` From 82a7d4b4a5e7cfbaea5076502b5ed9f0e00f2211 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 10:49:52 -0400 Subject: [PATCH 078/342] cleanup imports --- builtin/providers/template/datasource_cloudinit_config_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builtin/providers/template/datasource_cloudinit_config_test.go b/builtin/providers/template/datasource_cloudinit_config_test.go index f40fc8392..80f37348f 100644 --- a/builtin/providers/template/datasource_cloudinit_config_test.go +++ b/builtin/providers/template/datasource_cloudinit_config_test.go @@ -1,9 +1,8 @@ package template import ( - "testing" - "regexp" + "testing" r "github.com/hashicorp/terraform/helper/resource" ) From 414613787c0c1002765f133a83fdc57d17a570d2 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 10:54:23 -0400 Subject: [PATCH 079/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa8ba49aa..7f87abd27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,7 @@ BUG FIXES: * provider/aws: Update ElasticTranscoderPreset to have default for MaxFrameRate [GH-13422] * provider/aws: Fix aws_ami_launch_permission refresh when AMI disappears [GH-13469] * provider/aws: Add support for updating SSM documents [GH-13491] + * provider/aws: Fix panic on nil route configs [GH-13548] * provider/azurerm: Network Security Group - ignoring protocol casing at Import time [GH-13153] * provider/azurerm: Fix crash when importing Local Network Gateways [GH-13261] * provider/azurerm: Defaulting the value of `duplicate_detection_history_time_window` for `azurerm_servicebus_topic` [GH-13223] From 4606090c32dd69da5b7bcd5be5b1165f96344f3e Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 16:13:33 +0100 Subject: [PATCH 080/342] provider/aws: Add support for api_gateway_method_settings (#13542) --- builtin/providers/aws/provider.go | 1 + ...esource_aws_api_gateway_method_settings.go | 248 ++++++++++++++++ ...ce_aws_api_gateway_method_settings_test.go | 265 ++++++++++++++++++ .../api_gateway_method_settings.html.markdown | 93 ++++++ website/source/layouts/aws.erb | 3 + 5 files changed, 610 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_api_gateway_method_settings.go create mode 100644 builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go create mode 100644 website/source/docs/providers/aws/r/api_gateway_method_settings.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 9a2f7e5ff..d086dedb3 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -218,6 +218,7 @@ func Provider() terraform.ResourceProvider { "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), "aws_api_gateway_method": resourceAwsApiGatewayMethod(), "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), + "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), "aws_api_gateway_model": resourceAwsApiGatewayModel(), "aws_api_gateway_resource": resourceAwsApiGatewayResource(), "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_settings.go b/builtin/providers/aws/resource_aws_api_gateway_method_settings.go new file mode 100644 index 000000000..06d5efd01 --- /dev/null +++ b/builtin/providers/aws/resource_aws_api_gateway_method_settings.go @@ -0,0 +1,248 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayMethodSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayMethodSettingsUpdate, + Read: resourceAwsApiGatewayMethodSettingsRead, + Update: resourceAwsApiGatewayMethodSettingsUpdate, + Delete: resourceAwsApiGatewayMethodSettingsDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "stage_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "method_path": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "logging_level": { + Type: schema.TypeString, + Optional: true, + }, + "data_trace_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "throttling_burst_limit": { + Type: schema.TypeInt, + Optional: true, + }, + "throttling_rate_limit": { + Type: schema.TypeFloat, + Optional: true, + }, + "caching_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "cache_ttl_in_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + "cache_data_encrypted": { + Type: schema.TypeBool, + Optional: true, + }, + "require_authorization_for_cache_control": { + Type: schema.TypeBool, + Optional: true, + }, + "unauthorized_cache_control_header_strategy": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsApiGatewayMethodSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Method Settings %s", d.Id()) + input := apigateway.GetStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + stage, err := conn.GetStage(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API Gateway Stage %s not found, removing method settings", d.Id()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) + + methodPath := d.Get("method_path").(string) + settings, ok := stage.MethodSettings[methodPath] + if !ok { + log.Printf("[WARN] API Gateway Method Settings for %q not found, removing", methodPath) + d.SetId("") + return nil + } + + d.Set("settings.0.metrics_enabled", settings.MetricsEnabled) + d.Set("settings.0.logging_level", settings.LoggingLevel) + d.Set("settings.0.data_trace_enabled", settings.DataTraceEnabled) + d.Set("settings.0.throttling_burst_limit", settings.ThrottlingBurstLimit) + d.Set("settings.0.throttling_rate_limit", settings.ThrottlingRateLimit) + d.Set("settings.0.caching_enabled", settings.CachingEnabled) + d.Set("settings.0.cache_ttl_in_seconds", settings.CacheTtlInSeconds) + d.Set("settings.0.cache_data_encrypted", settings.CacheDataEncrypted) + d.Set("settings.0.require_authorization_for_cache_control", settings.RequireAuthorizationForCacheControl) + d.Set("settings.0.unauthorized_cache_control_header_strategy", settings.UnauthorizedCacheControlHeaderStrategy) + + return nil +} + +func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + methodPath := d.Get("method_path").(string) + prefix := fmt.Sprintf("/%s/", methodPath) + + ops := make([]*apigateway.PatchOperation, 0) + if d.HasChange("settings.0.metrics_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "metrics/enabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.metrics_enabled").(bool))), + }) + } + if d.HasChange("settings.0.logging_level") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "logging/loglevel"), + Value: aws.String(d.Get("settings.0.logging_level").(string)), + }) + } + if d.HasChange("settings.0.data_trace_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "logging/dataTrace"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.data_trace_enabled").(bool))), + }) + } + + if d.HasChange("settings.0.throttling_burst_limit") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "throttling/burstLimit"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.throttling_burst_limit").(int))), + }) + } + if d.HasChange("settings.0.throttling_rate_limit") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "throttling/rateLimit"), + Value: aws.String(fmt.Sprintf("%f", d.Get("settings.0.throttling_rate_limit").(float64))), + }) + } + if d.HasChange("settings.0.caching_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/enabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.caching_enabled").(bool))), + }) + } + if d.HasChange("settings.0.cache_ttl_in_seconds") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/ttlInSeconds"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_ttl_in_seconds").(int))), + }) + } + if d.HasChange("settings.0.cache_data_encrypted") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/dataEncrypted"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_data_encrypted").(int))), + }) + } + if d.HasChange("settings.0.require_authorization_for_cache_control") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/requireAuthorizationForCacheControl"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.require_authorization_for_cache_control").(bool))), + }) + } + if d.HasChange("settings.0.unauthorized_cache_control_header_strategy") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/unauthorizedCacheControlHeaderStrategy"), + Value: aws.String(d.Get("settings.0.unauthorized_cache_control_header_strategy").(string)), + }) + } + + restApiId := d.Get("rest_api_id").(string) + stageName := d.Get("stage_name").(string) + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(restApiId), + StageName: aws.String(stageName), + PatchOperations: ops, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + _, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + d.SetId(restApiId + "-" + stageName + "-" + methodPath) + + return resourceAwsApiGatewayMethodSettingsRead(d, meta) +} + +func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Method Settings: %s", d.Id()) + + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + PatchOperations: []*apigateway.PatchOperation{ + { + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s", d.Get("method_path").(string))), + }, + }, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + _, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + return nil +} diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go b/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go new file mode 100644 index 000000000..9372a6748 --- /dev/null +++ b/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go @@ -0,0 +1,265 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSAPIGatewayMethodSettings_basic(t *testing.T) { + var stage apigateway.Stage + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayMethodSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayMethodSettingsConfig(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayMethodSettingsExists("aws_api_gateway_method_settings.test", &stage), + testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(&stage, "test/GET", true), + testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(&stage, "test/GET", "INFO"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.#", "1"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.metrics_enabled", "true"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.logging_level", "INFO"), + ), + }, + + { + Config: testAccAWSAPIGatewayMethodSettingsConfigUpdate(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayMethodSettingsExists("aws_api_gateway_method_settings.test", &stage), + testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(&stage, "test/GET", false), + testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(&stage, "test/GET", "OFF"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.#", "1"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.metrics_enabled", "false"), + resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.logging_level", "OFF"), + ), + }, + }, + }) +} + +func testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(conf *apigateway.Stage, path string, expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + settings, ok := conf.MethodSettings[path] + if !ok { + return fmt.Errorf("Expected to find method settings for %q", path) + } + + if expected && *settings.MetricsEnabled != expected { + return fmt.Errorf("Expected metrics to be enabled, got %t", *settings.MetricsEnabled) + } + if !expected && *settings.MetricsEnabled != expected { + return fmt.Errorf("Expected metrics to be disabled, got %t", *settings.MetricsEnabled) + } + + return nil + } +} + +func testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(conf *apigateway.Stage, path string, expectedLevel string) resource.TestCheckFunc { + return func(s *terraform.State) error { + settings, ok := conf.MethodSettings[path] + if !ok { + return fmt.Errorf("Expected to find method settings for %q", path) + } + + if *settings.LoggingLevel != expectedLevel { + return fmt.Errorf("Expected logging level to match %q, got %q", expectedLevel, *settings.LoggingLevel) + } + + return nil + } +} + +func testAccCheckAWSAPIGatewayMethodSettingsExists(n string, res *apigateway.Stage) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No API Gateway Stage ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).apigateway + + req := &apigateway.GetStageInput{ + StageName: aws.String(s.RootModule().Resources["aws_api_gateway_deployment.test"].Primary.Attributes["stage_name"]), + RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), + } + out, err := conn.GetStage(req) + if err != nil { + return err + } + + *res = *out + + return nil + } +} + +func testAccCheckAWSAPIGatewayMethodSettingsDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).apigateway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_api_gateway_method_settings" { + continue + } + + req := &apigateway.GetStageInput{ + StageName: aws.String(s.RootModule().Resources["aws_api_gateway_deployment.test"].Primary.Attributes["stage_name"]), + RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), + } + out, err := conn.GetStage(req) + if err == nil { + return fmt.Errorf("API Gateway Stage still exists: %s", out) + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + if awsErr.Code() != "NotFoundException" { + return err + } + + return nil + } + + return nil +} + +func testAccAWSAPIGatewayMethodSettingsConfig(rInt int) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + name = "tf-acc-test-apig-method-%d" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" + + request_models = { + "application/json" = "Error" + } + + request_parameters = { + "method.request.header.Content-Type" = false, + "method.request.querystring.page" = true + } +} + +resource "aws_api_gateway_integration" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "${aws_api_gateway_method.test.http_method}" + type = "MOCK" + + request_templates { + "application/xml" = <> aws_api_gateway_method_response + > + aws_api_gateway_method_settings + > aws_api_gateway_model From 519a0fe97f7db40405510b0c0ff055ee52b1a40b Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 12 Apr 2017 16:15:12 +0100 Subject: [PATCH 081/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f87abd27..524da767b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: + * **New Resource:** `aws_api_gateway_method_settings` [GH-13542] * **New Resource:** `aws_api_gateway_stage` [GH-13540] * **New Resource:** `aws_iam_openid_connect_provider` [GH-13456] * **New Resource:** `aws_lightsail_static_ip` [GH-13175] From e57790e63635f053647b3d6c663e1df88bb7a7f7 Mon Sep 17 00:00:00 2001 From: Jenny Duckett Date: Wed, 12 Apr 2017 16:43:43 +0100 Subject: [PATCH 082/342] website: Add sidebar links to GitHub webhook resource pages These resources and their documentation were added in https://github.com/hashicorp/terraform/pull/12924. Add sidebar links to the docs pages to make them easier to find. --- website/source/layouts/github.erb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/layouts/github.erb b/website/source/layouts/github.erb index 72c9d8f24..05d73bd2d 100644 --- a/website/source/layouts/github.erb +++ b/website/source/layouts/github.erb @@ -16,12 +16,18 @@ > github_membership + > + github_organization_webhook + > github_repository > github_repository_collaborator + > + github_repository_webhook + > github_team From 9c431aee1b3a05fe228affc7e80806e21b54b634 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 12 Apr 2017 13:30:49 -0400 Subject: [PATCH 083/342] only list environments when the keyName matches Prevent extra keys in the s3 envPrefix path from showing up as listed environments. Better handle keys containing slashes Add tests for unexpected keys in s3. --- backend/remote-state/s3/backend_state.go | 24 +++-- backend/remote-state/s3/backend_test.go | 132 ++++++++++++++++++++++- 2 files changed, 148 insertions(+), 8 deletions(-) diff --git a/backend/remote-state/s3/backend_state.go b/backend/remote-state/s3/backend_state.go index 2d745156e..f7a4d337d 100644 --- a/backend/remote-state/s3/backend_state.go +++ b/backend/remote-state/s3/backend_state.go @@ -1,6 +1,7 @@ package s3 import ( + "errors" "fmt" "sort" "strings" @@ -30,29 +31,34 @@ func (b *Backend) States() ([]string, error) { return nil, err } - var envs []string + envs := []string{backend.DefaultStateName} for _, obj := range resp.Contents { - env := keyEnv(*obj.Key) + env := b.keyEnv(*obj.Key) if env != "" { envs = append(envs, env) } } - sort.Strings(envs) - envs = append([]string{backend.DefaultStateName}, envs...) + sort.Strings(envs[1:]) return envs, nil } // extract the env name from the S3 key -func keyEnv(key string) string { - parts := strings.Split(key, "/") +func (b *Backend) keyEnv(key string) string { + // we have 3 parts, the prefix, the env name, and the key name + parts := strings.SplitN(key, "/", 3) if len(parts) < 3 { // no env here return "" } + // shouldn't happen since we listed by prefix if parts[0] != keyEnvPrefix { - // not our key, so ignore + return "" + } + + // not our key, so don't include it in our listing + if parts[2] != b.keyName { return "" } @@ -78,6 +84,10 @@ func (b *Backend) DeleteState(name string) error { } func (b *Backend) State(name string) (state.State, error) { + if name == "" { + return nil, errors.New("missing state name") + } + client := &RemoteClient{ s3Client: b.s3Client, dynClient: b.dynClient, diff --git a/backend/remote-state/s3/backend_test.go b/backend/remote-state/s3/backend_test.go index 44987683f..d90c76a6c 100644 --- a/backend/remote-state/s3/backend_test.go +++ b/backend/remote-state/s3/backend_test.go @@ -3,6 +3,7 @@ package s3 import ( "fmt" "os" + "reflect" "testing" "time" @@ -10,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" ) // verify that we are doing ACC tests or the S3 tests specifically @@ -84,7 +87,7 @@ func TestBackendLocked(t *testing.T) { testACC(t) bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) - keyName := "testState" + keyName := "test/state" b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{ "bucket": bucketName, @@ -108,6 +111,133 @@ func TestBackendLocked(t *testing.T) { backend.TestBackend(t, b1, b2) } +// add some extra junk in S3 to try and confuse the env listing. +func TestBackendExtraPaths(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix()) + keyName := "test/state/tfstate" + + b := backend.TestBackendConfig(t, New(), map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + }).(*Backend) + + createS3Bucket(t, b.s3Client, bucketName) + defer deleteS3Bucket(t, b.s3Client, bucketName) + + // put multiple states in old env paths. + s1 := terraform.NewState() + s2 := terraform.NewState() + + // RemoteClient to Put things in various paths + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path("s1"), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + lockTable: b.lockTable, + } + + stateMgr := &remote.State{Client: client} + stateMgr.WriteState(s1) + if err := stateMgr.PersistState(); err != nil { + t.Fatal(err) + } + + client.path = b.path("s2") + stateMgr.WriteState(s2) + if err := stateMgr.PersistState(); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // put a state in an env directory name + client.path = keyEnvPrefix + "/error" + stateMgr.WriteState(terraform.NewState()) + if err := stateMgr.PersistState(); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // add state with the wrong key for an existing env + client.path = keyEnvPrefix + "/s2/notTestState" + stateMgr.WriteState(terraform.NewState()) + if err := stateMgr.PersistState(); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // remove the state with extra subkey + if err := b.DeleteState("s2"); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "s1"}); err != nil { + t.Fatal(err) + } + + // fetch that state again, which should produce a new lineage + s2Mgr, err := b.State("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if s2Mgr.State().Lineage == s2.Lineage { + t.Fatal("state s2 was not deleted") + } + s2 = s2Mgr.State() + + // add a state with a key that matches an existing environment dir name + client.path = keyEnvPrefix + "/s2/" + stateMgr.WriteState(terraform.NewState()) + if err := stateMgr.PersistState(); err != nil { + t.Fatal(err) + } + + // make sure s2 is OK + s2Mgr, err = b.State("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if s2Mgr.State().Lineage != s2.Lineage { + t.Fatal("we got the wrong state for s2") + } + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } +} + +func checkStateList(b backend.Backend, expected []string) error { + states, err := b.States() + if err != nil { + return err + } + + if !reflect.DeepEqual(states, expected) { + return fmt.Errorf("incorrect states listed: %q", states) + } + return nil +} + func createS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) { createBucketReq := &s3.CreateBucketInput{ Bucket: &bucketName, From d1b4df42ed0d046ddbf2bc60aded820c2a936a7b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 12 Apr 2017 13:42:23 -0400 Subject: [PATCH 084/342] missing PersistState in env new --- command/env_new.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/command/env_new.go b/command/env_new.go index 84b21bf8b..04c52a6d2 100644 --- a/command/env_new.go +++ b/command/env_new.go @@ -122,6 +122,11 @@ func (c *EnvNewCommand) Run(args []string) int { c.Ui.Error(err.Error()) return 1 } + err = sMgr.PersistState() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } return 0 } From 5db1216a18acfc9a9ac222cb2becd6aed0537f67 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 12 Apr 2017 14:31:20 -0400 Subject: [PATCH 085/342] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 524da767b..8fe9189df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ IMPROVEMENTS: * core: add `-lock-timeout` option, which will block and retry locks for the given duration [GH-13262] * core: new `chomp` interpolation function which returns the given string with any trailing newline characters removed [GH-13419] * backend/remote-state: Add support for assume role extensions to s3 backend [GH-13236] + * backend/remote-state: Filter extra entries from s3 environment listings [GH-13596] * config: New interpolation functions `basename` and `dirname`, for file path manipulation [GH-13080] * helper/resource: Allow unknown "pending" states [GH-13099] * command/hook_ui: Increase max length of state IDs from 20 to 80 [GH-13317] From 00e8986357e2fdbb042ab0ab3796423a56349bca Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 12 Apr 2017 20:49:04 +0200 Subject: [PATCH 086/342] provider/dnsimple: Handle 404 on DNSimple records (#13131) When a record was manually deleted from the console, we got an error saying 404 Record Not Found //cc @weppos This PR now handles the usecase: ``` % make testacc TEST=./builtin/providers/dnsimple ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/03/28 21:48:19 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/dnsimple -v -timeout 120m === RUN TestProvider --- PASS: TestProvider (0.00s) === RUN TestProvider_impl --- PASS: TestProvider_impl (0.00s) === RUN TestAccDNSimpleRecord_Basic --- PASS: TestAccDNSimpleRecord_Basic (1.81s) === RUN TestAccDNSimpleRecord_CreateMxWithPriority --- PASS: TestAccDNSimpleRecord_CreateMxWithPriority (1.32s) === RUN TestAccDNSimpleRecord_Updated --- PASS: TestAccDNSimpleRecord_Updated (4.46s) === RUN TestAccDNSimpleRecord_disappears --- PASS: TestAccDNSimpleRecord_disappears (1.20s) === RUN TestAccDNSimpleRecord_UpdatedMx --- PASS: TestAccDNSimpleRecord_UpdatedMx (2.91s) PASS ok github.com/hashicorp/terraform/builtin/providers/dnsimple 11.723s ``` --- .../dnsimple/resource_dnsimple_record.go | 6 +++ .../dnsimple/resource_dnsimple_record_test.go | 48 ++++++++++++++++--- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/builtin/providers/dnsimple/resource_dnsimple_record.go b/builtin/providers/dnsimple/resource_dnsimple_record.go index a5e39472c..3f17977a5 100644 --- a/builtin/providers/dnsimple/resource_dnsimple_record.go +++ b/builtin/providers/dnsimple/resource_dnsimple_record.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "strconv" + "strings" "github.com/dnsimple/dnsimple-go/dnsimple" "github.com/hashicorp/terraform/helper/schema" @@ -104,6 +105,11 @@ func resourceDNSimpleRecordRead(d *schema.ResourceData, meta interface{}) error resp, err := provider.client.Zones.GetRecord(provider.config.Account, d.Get("domain").(string), recordID) if err != nil { + if err != nil && strings.Contains(err.Error(), "404") { + log.Printf("DNSimple Record Not Found - Refreshing from State") + d.SetId("") + return nil + } return fmt.Errorf("Couldn't find DNSimple Record: %s", err) } diff --git a/builtin/providers/dnsimple/resource_dnsimple_record_test.go b/builtin/providers/dnsimple/resource_dnsimple_record_test.go index e7e5e876f..8b19697bd 100644 --- a/builtin/providers/dnsimple/resource_dnsimple_record_test.go +++ b/builtin/providers/dnsimple/resource_dnsimple_record_test.go @@ -20,7 +20,7 @@ func TestAccDNSimpleRecord_Basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckDNSimpleRecordDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -46,7 +46,7 @@ func TestAccDNSimpleRecord_CreateMxWithPriority(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckDNSimpleRecordDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -73,7 +73,7 @@ func TestAccDNSimpleRecord_Updated(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckDNSimpleRecordDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -86,7 +86,7 @@ func TestAccDNSimpleRecord_Updated(t *testing.T) { "dnsimple_record.foobar", "value", "192.168.0.10"), ), }, - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_new_value, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -103,6 +103,27 @@ func TestAccDNSimpleRecord_Updated(t *testing.T) { }) } +func TestAccDNSimpleRecord_disappears(t *testing.T) { + var record dnsimple.ZoneRecord + domain := os.Getenv("DNSIMPLE_DOMAIN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDNSimpleRecordDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), + testAccCheckDNSimpleRecordDisappears(&record, domain), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccDNSimpleRecord_UpdatedMx(t *testing.T) { var record dnsimple.ZoneRecord domain := os.Getenv("DNSIMPLE_DOMAIN") @@ -112,7 +133,7 @@ func TestAccDNSimpleRecord_UpdatedMx(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckDNSimpleRecordDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -126,7 +147,7 @@ func TestAccDNSimpleRecord_UpdatedMx(t *testing.T) { "dnsimple_record.foobar", "priority", "5"), ), }, - resource.TestStep{ + { Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx_new_value, domain), Check: resource.ComposeTestCheckFunc( testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), @@ -144,6 +165,21 @@ func TestAccDNSimpleRecord_UpdatedMx(t *testing.T) { }) } +func testAccCheckDNSimpleRecordDisappears(record *dnsimple.ZoneRecord, domain string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + provider := testAccProvider.Meta().(*Client) + + _, err := provider.client.Zones.DeleteRecord(provider.config.Account, domain, record.ID) + if err != nil { + return err + } + + return nil + } + +} + func testAccCheckDNSimpleRecordDestroy(s *terraform.State) error { provider := testAccProvider.Meta().(*Client) From 9c20f9ccc72791ee468aac8724634441970a5371 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 14:49:09 -0400 Subject: [PATCH 087/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fe9189df..df2c31985 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ BUG FIXES: * provider/openstack: Refresh volume_attachment from state if NotFound [GH-13342] * provider/openstack: Add SOFT_DELETED to delete status [GH-13444] * provider/profitbricks: Changed output type of ips variable of ip_block ProfitBricks resource [GH-13290] + * provider/template: Fix panic in cloudinit config [GH-13581] ## 0.9.2 (March 28, 2017) From a1608cf659bd2eb425cbad55edda7d35b28c35d5 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 12 Apr 2017 20:50:13 +0200 Subject: [PATCH 088/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index df2c31985..6ec0fe085 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,7 @@ BUG FIXES: * provider/azurerm: Defaulting the value of `duplicate_detection_history_time_window` for `azurerm_servicebus_topic` [GH-13223] * provider/azurerm: Event Hubs making the Location field idempotent [GH-13570] * provider/bitbucket: Fixed issue where provider would fail with an "EOF" error on some operations [GH-13390] + * provider/dnsimple: Handle 404 on DNSimple records [GH-13131] * provider/kubernetes: Use PATCH to update namespace [GH-13114] * provider/ns1: No splitting answer on SPF records. [GH-13260] * provider/openstack: Refresh volume_attachment from state if NotFound [GH-13342] From 491cc0b725279ecda758280a3ba09c5d95738272 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 12 Apr 2017 14:52:33 -0400 Subject: [PATCH 089/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ec0fe085..f4a3268b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ IMPROVEMENTS: * provider/aws: Add support for evaluate_low_sample_count_percentiles to cloudwatch_metric_alarm [GH-13371] * provider/aws: Add `name_prefix` to `aws_alb_target_group` [GH-13442] * provider/aws: Add support for EMR clusters to aws_appautoscaling_target [GH-13368] + * provider/aws: Add import capabilities to codecommit_repository [GH-13577] * provider/bitbucket: Improved error handling [GH-13390] * provider/cloudstack: Do not force a new resource when updating `cloudstack_loadbalancer_rule` members [GH-11786] * provider/fastly: Add support for Sumologic logging [GH-12541] From 9ef9501e654a23faf4b4787db2f17748168db53c Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 12 Apr 2017 14:19:38 -0500 Subject: [PATCH 090/342] provider/aws: Fix EMR Bootstrap Action Ordering (#13580) * provider/aws: Add failing test for EMR Bootstrap Actions * aws_emr_cluster: Fix bootstrap action parameter ordering * provider/aws: Fix EMR Bootstrap arguments * provider/aws: Args needs to be ForceNew, because we can't update them --- .../providers/aws/resource_aws_emr_cluster.go | 32 +- .../aws/resource_aws_emr_cluster_test.go | 407 +++++++++++++++++- helper/acctest/random.go | 7 + 3 files changed, 430 insertions(+), 16 deletions(-) diff --git a/builtin/providers/aws/resource_aws_emr_cluster.go b/builtin/providers/aws/resource_aws_emr_cluster.go index 9217d0ed7..62b138505 100644 --- a/builtin/providers/aws/resource_aws_emr_cluster.go +++ b/builtin/providers/aws/resource_aws_emr_cluster.go @@ -138,10 +138,10 @@ func resourceAwsEMRCluster() *schema.Resource { Required: true, }, "args": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, }, }, @@ -381,6 +381,18 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("ec2_attributes", flattenEc2Attributes(cluster.Ec2InstanceAttributes)); err != nil { log.Printf("[ERR] Error setting EMR Ec2 Attributes: %s", err) } + + respBootstraps, err := emrconn.ListBootstrapActions(&emr.ListBootstrapActionsInput{ + ClusterId: cluster.Id, + }) + if err != nil { + log.Printf("[WARN] Error listing bootstrap actions: %s", err) + } + + if err := d.Set("bootstrap_action", flattenBootstrapArguments(respBootstraps.BootstrapActions)); err != nil { + log.Printf("[WARN] Error setting Bootstrap Actions: %s", err) + } + return nil } @@ -589,6 +601,20 @@ func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{ return result } +func flattenBootstrapArguments(actions []*emr.Command) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for _, b := range actions { + attrs := make(map[string]interface{}) + attrs["name"] = *b.Name + attrs["path"] = *b.ScriptPath + attrs["args"] = flattenStringList(b.Args) + result = append(result, attrs) + } + + return result +} + func loadGroups(d *schema.ResourceData, meta interface{}) ([]*emr.InstanceGroup, error) { emrconn := meta.(*AWSClient).emrconn reqGrps := &emr.ListInstanceGroupsInput{ @@ -699,7 +725,7 @@ func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActi actionAttributes := raw.(map[string]interface{}) actionName := actionAttributes["name"].(string) actionPath := actionAttributes["path"].(string) - actionArgs := actionAttributes["args"].(*schema.Set).List() + actionArgs := actionAttributes["args"].([]interface{}) action := &emr.BootstrapActionConfig{ Name: aws.String(actionName), diff --git a/builtin/providers/aws/resource_aws_emr_cluster_test.go b/builtin/providers/aws/resource_aws_emr_cluster_test.go index 2760e8e76..688c86f3f 100644 --- a/builtin/providers/aws/resource_aws_emr_cluster_test.go +++ b/builtin/providers/aws/resource_aws_emr_cluster_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "reflect" "testing" "github.com/aws/aws-sdk-go/aws" @@ -14,7 +15,7 @@ import ( ) func TestAccAWSEMRCluster_basic(t *testing.T) { - var jobFlow emr.RunJobFlowOutput + var cluster emr.Cluster r := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,14 +24,51 @@ func TestAccAWSEMRCluster_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSEmrClusterConfig(r), - Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), + }, + }, + }) +} + +func TestAccAWSEMRCluster_bootstrap_ordering(t *testing.T) { + var cluster emr.Cluster + rName := acctest.RandomWithPrefix("tf-emr-bootstrap") + argsInts := []string{ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + } + + argsStrings := []string{ + "instance.isMaster=true", + "echo running on master node", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrClusterConfig_bootstrap(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrClusterExists("aws_emr_cluster.test", &cluster), + testAccCheck_bootstrap_order(&cluster, argsInts, argsStrings), + ), }, }, }) } func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { - var jobFlow emr.RunJobFlowOutput + var cluster emr.Cluster r := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -40,7 +78,7 @@ func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { { Config: testAccAWSEmrClusterConfig(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "termination_protection", "false"), ), @@ -48,7 +86,7 @@ func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { { Config: testAccAWSEmrClusterConfigTerminationPolicyUpdated(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "termination_protection", "true"), ), @@ -57,7 +95,7 @@ func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { //Need to turn off termination_protection to allow the job to be deleted Config: testAccAWSEmrClusterConfig(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), ), }, }, @@ -65,7 +103,7 @@ func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { } func TestAccAWSEMRCluster_visibleToAllUsers(t *testing.T) { - var jobFlow emr.RunJobFlowOutput + var cluster emr.Cluster r := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -75,7 +113,7 @@ func TestAccAWSEMRCluster_visibleToAllUsers(t *testing.T) { { Config: testAccAWSEmrClusterConfig(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "visible_to_all_users", "true"), ), @@ -83,7 +121,7 @@ func TestAccAWSEMRCluster_visibleToAllUsers(t *testing.T) { { Config: testAccAWSEmrClusterConfigVisibleToAllUsersUpdated(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "visible_to_all_users", "false"), ), @@ -93,7 +131,7 @@ func TestAccAWSEMRCluster_visibleToAllUsers(t *testing.T) { } func TestAccAWSEMRCluster_tags(t *testing.T) { - var jobFlow emr.RunJobFlowOutput + var cluster emr.Cluster r := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -103,7 +141,7 @@ func TestAccAWSEMRCluster_tags(t *testing.T) { { Config: testAccAWSEmrClusterConfig(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "4"), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "tags.role", "rolename"), @@ -117,7 +155,7 @@ func TestAccAWSEMRCluster_tags(t *testing.T) { { Config: testAccAWSEmrClusterConfigUpdatedTags(r), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "3"), resource.TestCheckResourceAttr( "aws_emr_cluster.tf-test-cluster", "tags.dns_zone", "new_zone"), @@ -131,6 +169,45 @@ func TestAccAWSEMRCluster_tags(t *testing.T) { }) } +func testAccCheck_bootstrap_order(cluster *emr.Cluster, argsInts, argsStrings []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + emrconn := testAccProvider.Meta().(*AWSClient).emrconn + req := emr.ListBootstrapActionsInput{ + ClusterId: cluster.Id, + } + + resp, err := emrconn.ListBootstrapActions(&req) + if err != nil { + return fmt.Errorf("[ERR] Error listing boostrap actions in test: %s", err) + } + + // make sure we actually checked something + var ran bool + for _, ba := range resp.BootstrapActions { + // assume name matches the config + rArgs := aws.StringValueSlice(ba.Args) + if *ba.Name == "test" { + ran = true + if !reflect.DeepEqual(argsInts, rArgs) { + return fmt.Errorf("Error matching Bootstrap args:\n\texpected: %#v\n\tgot: %#v", argsInts, rArgs) + } + } else if *ba.Name == "runif" { + ran = true + if !reflect.DeepEqual(argsStrings, rArgs) { + return fmt.Errorf("Error matching Bootstrap args:\n\texpected: %#v\n\tgot: %#v", argsStrings, rArgs) + } + } + } + + if !ran { + return fmt.Errorf("Expected to compare bootstrap actions, but no checks were ran") + } + + return nil + } +} + func testAccCheckAWSEmrDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).emrconn @@ -163,7 +240,7 @@ func testAccCheckAWSEmrDestroy(s *terraform.State) error { return nil } -func testAccCheckAWSEmrClusterExists(n string, v *emr.RunJobFlowOutput) resource.TestCheckFunc { +func testAccCheckAWSEmrClusterExists(n string, v *emr.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -185,6 +262,8 @@ func testAccCheckAWSEmrClusterExists(n string, v *emr.RunJobFlowOutput) resource return fmt.Errorf("EMR cluser not found") } + *v = *describe.Cluster + if describe.Cluster != nil && *describe.Cluster.Status.State != "WAITING" { return fmt.Errorf("EMR cluser is not up yet") @@ -194,6 +273,308 @@ func testAccCheckAWSEmrClusterExists(n string, v *emr.RunJobFlowOutput) resource } } +func testAccAWSEmrClusterConfig_bootstrap(r string) string { + return fmt.Sprintf(` +resource "aws_emr_cluster" "test" { + count = 1 + name = "%s" + release_label = "emr-5.0.0" + applications = ["Hadoop", "Hive"] + log_uri = "s3n://terraform/testlog/" + master_instance_type = "m4.large" + core_instance_type = "m1.small" + core_instance_count = 1 + service_role = "${aws_iam_role.iam_emr_default_role.arn}" + + depends_on = ["aws_main_route_table_association.a"] + + ec2_attributes { + subnet_id = "${aws_subnet.main.id}" + + emr_managed_master_security_group = "${aws_security_group.allow_all.id}" + emr_managed_slave_security_group = "${aws_security_group.allow_all.id}" + instance_profile = "${aws_iam_instance_profile.emr_profile.arn}" + } + + bootstrap_action { + path = "s3://elasticmapreduce/bootstrap-actions/run-if" + name = "runif" + args = ["instance.isMaster=true", "echo running on master node"] + } + + bootstrap_action = [ + { + path = "s3://${aws_s3_bucket.tester.bucket}/testscript.sh" + name = "test" + + args = ["1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + ] + }, + ] +} + +resource "aws_iam_instance_profile" "emr_profile" { + name = "%s_profile" + role = "${aws_iam_role.iam_emr_profile_role.name}" +} + +resource "aws_iam_role" "iam_emr_default_role" { + name = "%s_default_role" + + assume_role_policy = < Date: Wed, 12 Apr 2017 14:29:03 -0500 Subject: [PATCH 091/342] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4a3268b7..19cd39a69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## 0.9.3 (unreleased) +BACKWARDS INCOMPATIBILITIES / NOTES: + * provider/aws: Fix a critical bug in `aws_emr_cluster` in order to preserve the ordering + of any arguments in `bootstrap_action`. Terraform will now enforce the ordering + from the configuration. As a result, `aws_emr_cluster` resources may need to be + recreated, as there is no API to update them in-place + FEATURES: * **New Resource:** `aws_api_gateway_method_settings` [GH-13542] From 47dff98e4b85833dd0b27398cb80bc8911a3e9d9 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 12 Apr 2017 14:29:19 -0500 Subject: [PATCH 092/342] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19cd39a69..f27dbf180 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * provider/aws: Fix a critical bug in `aws_emr_cluster` in order to preserve the ordering of any arguments in `bootstrap_action`. Terraform will now enforce the ordering from the configuration. As a result, `aws_emr_cluster` resources may need to be - recreated, as there is no API to update them in-place + recreated, as there is no API to update them in-place [GH-13580] FEATURES: From d3eeaa0b4e29f3a8b4b5c8497551d60d4e9068fe Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 12 Apr 2017 14:30:37 -0500 Subject: [PATCH 093/342] fix spelling --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f27dbf180..3d7099885 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -111,7 +111,7 @@ BUG FIXES: ## 0.9.2 (March 28, 2017) -BACKWARDS IMCOMPATIBILITIES / NOTES: +BACKWARDS INCOMPATIBILITIES / NOTES: * provider/openstack: Port Fixed IPs are able to be read again using the original numerical notation. However, Fixed IP configurations which are obtaining addresses via DHCP must now use the `all_fixed_ips` attribute to reference the returned IP address. * Environment names must be safe to use as a URL path segment without escaping, and is enforced by the CLI. @@ -192,7 +192,7 @@ BUG FIXES: ## 0.9.1 (March 17, 2017) -BACKWARDS IMCOMPATIBILITIES / NOTES: +BACKWARDS INCOMPATIBILITIES / NOTES: * provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source ([#12396](https://github.com/hashicorp/terraform/issues/12396)) From dea8b267a94d2131046967ba4c09cd7b7e0b6509 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 12 Apr 2017 12:38:45 -0700 Subject: [PATCH 094/342] provider/google: bump container cluster version in tests. The version we were using has been deprecated and is no longer available, making the withVersion test no longer pass. I've bumped it to the latest available version. --- builtin/providers/google/resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go index f0723dcb1..6c4acb5f3 100644 --- a/builtin/providers/google/resource_container_cluster_test.go +++ b/builtin/providers/google/resource_container_cluster_test.go @@ -345,7 +345,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(` resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "1.5.2" + node_version = "1.6.0" initial_node_count = 1 master_auth { From 11a20ddb53df8328b6859b30fe226248c6678869 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 12 Apr 2017 12:57:53 -0700 Subject: [PATCH 095/342] provider/google: Add node_pool field in resource_container_cluster. (#13402) --- .../google/resource_container_cluster.go | 88 +++++++++++++- .../google/resource_container_cluster_test.go | 111 ++++++++++++++++++ .../google/r/container_cluster.html.markdown | 12 ++ 3 files changed, 205 insertions(+), 6 deletions(-) diff --git a/builtin/providers/google/resource_container_cluster.go b/builtin/providers/google/resource_container_cluster.go index 084456f2a..8b3233111 100644 --- a/builtin/providers/google/resource_container_cluster.go +++ b/builtin/providers/google/resource_container_cluster.go @@ -6,6 +6,7 @@ import ( "net" "regexp" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" "google.golang.org/api/googleapi" @@ -23,12 +24,6 @@ func resourceContainerCluster() *schema.Resource { Delete: resourceContainerClusterDelete, Schema: map[string]*schema.Schema{ - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "master_auth": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -96,6 +91,12 @@ func resourceContainerCluster() *schema.Resource { ForceNew: true, }, + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "additional_zones": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -292,6 +293,36 @@ func resourceContainerCluster() *schema.Resource { Computed: true, }, + "node_pool": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_pool.name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -439,6 +470,33 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } } + nodePoolsCount := d.Get("node_pool.#").(int) + if nodePoolsCount > 0 { + nodePools := make([]*container.NodePool, 0, nodePoolsCount) + for i := 0; i < nodePoolsCount; i++ { + prefix := fmt.Sprintf("node_pool.%d", i) + + nodeCount := d.Get(prefix + ".initial_node_count").(int) + + var name string + if v, ok := d.GetOk(prefix + ".name"); ok { + name = v.(string) + } else if v, ok := d.GetOk(prefix + ".name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + nodePools = append(nodePools, nodePool) + } + cluster.NodePools = nodePools + } + req := &container.CreateClusterRequest{ Cluster: cluster, } @@ -523,6 +581,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("network", d.Get("network").(string)) d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) + d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools)) if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { return err @@ -641,3 +700,20 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} return config } + +func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} { + count := len(c) + + nodePools := make([]map[string]interface{}, 0, count) + + for i, np := range c { + nodePool := map[string]interface{}{ + "name": np.Name, + "name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), + "initial_node_count": np.InitialNodeCount, + } + nodePools = append(nodePools, nodePool) + } + + return nodePools +} diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go index f0723dcb1..03343cb0c 100644 --- a/builtin/providers/google/resource_container_cluster_test.go +++ b/builtin/providers/google/resource_container_cluster_test.go @@ -132,6 +132,57 @@ func TestAccContainerCluster_backend(t *testing.T) { }) } +func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolBasic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolNamePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_name_prefix"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolMultiple, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_multiple"), + ), + }, + }, + }) +} + func testAccCheckContainerClusterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -222,6 +273,13 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { } } + for i, np := range cluster.NodePools { + prefix := fmt.Sprintf("node_pool.%d.", i) + clusterTests = append(clusterTests, + clusterTestField{prefix + "name", np.Name}, + clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)}) + } + for _, attrs := range clusterTests { if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { return fmt.Errorf(c) @@ -478,3 +536,56 @@ resource "google_container_cluster" "primary" { } } `, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } +}`, acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_name_prefix" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name_prefix = "tf-np-test" + initial_node_count = 2 + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolMultiple = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_multiple" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 3 + } +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown index 025603eb1..7679f1fd2 100644 --- a/website/source/docs/providers/google/r/container_cluster.html.markdown +++ b/website/source/docs/providers/google/r/container_cluster.html.markdown @@ -85,6 +85,8 @@ resource "google_container_cluster" "primary" { * `node_config` - (Optional) The machine type and image to use for all nodes in this cluster +* `node_pool` - (Optional) List of node pools associated with this cluster. + * `node_version` - (Optional) The Kubernetes version on the nodes. Also affects the initial master version on cluster creation. Updates affect nodes only. Defaults to the default version set by GKE which is not necessarily the latest @@ -156,6 +158,16 @@ addons_config { } ``` +**Node Pool** supports the following arguments: + +* `initial_node_count` - (Required) The initial node count for the pool. + +* `name` - (Optional) The name of the node pool. If left blank, Terraform will + auto-generate a unique name. + +* `name_prefix` - (Optional) Creates a unique name for the node pool beginning + with the specified prefix. Conflicts with `name`. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are From f5ed62e5b85826934b2867cc8a4c3fd69d2424bb Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 12 Apr 2017 13:07:05 -0700 Subject: [PATCH 096/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d7099885..0d9a7ae0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ IMPROVEMENTS: * provider/fastly: Add support for Sumologic logging [GH-12541] * provider/github: Handle the case when issue labels already exist [GH-13182] * provider/google: Mark `google_container_cluster`'s `client_key` & `password` inside `master_auth` as sensitive [GH-13148] + * provider/google: Add node_pool field in resource_container_cluster [GH-13402] * provider/kubernetes: Allow defining custom config context [GH-12958] * provider/openstack: Add support for 'value_specs' options to `openstack_compute_servergroup_v2` [GH-13380] * provider/statuscake: Add support for StatusCake TriggerRate field [GH-13340] From 1601af7a076fd03fe940455db7fd01dfa747c795 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 12 Apr 2017 20:21:42 +0000 Subject: [PATCH 097/342] v0.9.3 --- CHANGELOG.md | 2 +- terraform/version.go | 2 +- website/config.rb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d9a7ae0d..4580c6120 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.9.3 (unreleased) +## 0.9.3 (April 12, 2017) BACKWARDS INCOMPATIBILITIES / NOTES: * provider/aws: Fix a critical bug in `aws_emr_cluster` in order to preserve the ordering diff --git a/terraform/version.go b/terraform/version.go index ada5dcc38..c9ce35b05 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -12,7 +12,7 @@ const Version = "0.9.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a diff --git a/website/config.rb b/website/config.rb index b58df71f0..ed989b37d 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.terraform.io/" activate :hashicorp do |h| h.name = "terraform" - h.version = "0.9.2" + h.version = "0.9.3" h.github_slug = "hashicorp/terraform" end From 381adca0af66650da71729a206c1f882e38cc9c4 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 12 Apr 2017 20:39:57 +0000 Subject: [PATCH 098/342] release: clean up after v0.9.3 --- CHANGELOG.md | 4 ++++ terraform/version.go | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4580c6120..1a2182449 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## 0.9.4 (Unreleased) + + + ## 0.9.3 (April 12, 2017) BACKWARDS INCOMPATIBILITIES / NOTES: diff --git a/terraform/version.go b/terraform/version.go index c9ce35b05..5dbc57fce 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -7,12 +7,12 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.9.3" +const Version = "0.9.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From 051582d32ad8888d2d095ce9069a9aa9d70b88a7 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 12 Apr 2017 23:25:15 +0200 Subject: [PATCH 099/342] Add the close provider and provisioner transformers (#13102) --- terraform/graph_builder_apply.go | 4 ++++ terraform/graph_builder_apply_test.go | 28 +++++++++++++++++++++++++++ terraform/graph_builder_import.go | 3 +++ terraform/graph_builder_plan.go | 4 ++++ terraform/graph_builder_plan_test.go | 11 +++++++++++ terraform/graph_builder_refresh.go | 3 +++ 6 files changed, 53 insertions(+) diff --git a/terraform/graph_builder_apply.go b/terraform/graph_builder_apply.go index 61242586a..38a90f277 100644 --- a/terraform/graph_builder_apply.go +++ b/terraform/graph_builder_apply.go @@ -123,6 +123,10 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Target &TargetsTransformer{Targets: b.Targets}, + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + // Single root &RootTransformer{}, } diff --git a/terraform/graph_builder_apply_test.go b/terraform/graph_builder_apply_test.go index b62833103..a1c02dfa8 100644 --- a/terraform/graph_builder_apply_test.go +++ b/terraform/graph_builder_apply_test.go @@ -510,6 +510,18 @@ module.child.provider.aws provider.aws module.child.provisioner.exec provider.aws +provider.aws (close) + aws_instance.create + aws_instance.other + module.child.aws_instance.create + module.child.aws_instance.other + provider.aws +provisioner.exec (close) + module.child.aws_instance.create +root + meta.count-boundary (count boundary fixup) + provider.aws (close) + provisioner.exec (close) ` const testApplyGraphBuilderDoubleCBDStr = ` @@ -533,6 +545,15 @@ meta.count-boundary (count boundary fixup) aws_instance.B (destroy) provider.aws provider.aws +provider.aws (close) + aws_instance.A + aws_instance.A (destroy) + aws_instance.B + aws_instance.B (destroy) + provider.aws +root + meta.count-boundary (count boundary fixup) + provider.aws (close) ` const testApplyGraphBuilderDestroyCountStr = ` @@ -546,4 +567,11 @@ meta.count-boundary (count boundary fixup) aws_instance.B provider.aws provider.aws +provider.aws (close) + aws_instance.A[1] (destroy) + aws_instance.B + provider.aws +root + meta.count-boundary (count boundary fixup) + provider.aws (close) ` diff --git a/terraform/graph_builder_import.go b/terraform/graph_builder_import.go index 7fa76ded7..7070c59e4 100644 --- a/terraform/graph_builder_import.go +++ b/terraform/graph_builder_import.go @@ -62,6 +62,9 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { // This validates that the providers only depend on variables &ImportProviderValidateTransformer{}, + // Close opened plugin connections + &CloseProviderTransformer{}, + // Single root &RootTransformer{}, diff --git a/terraform/graph_builder_plan.go b/terraform/graph_builder_plan.go index 275cb32f3..02d869700 100644 --- a/terraform/graph_builder_plan.go +++ b/terraform/graph_builder_plan.go @@ -116,6 +116,10 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { // Target &TargetsTransformer{Targets: b.Targets}, + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + // Single root &RootTransformer{}, } diff --git a/terraform/graph_builder_plan_test.go b/terraform/graph_builder_plan_test.go index 02366fe4f..23526a9ac 100644 --- a/terraform/graph_builder_plan_test.go +++ b/terraform/graph_builder_plan_test.go @@ -65,6 +65,17 @@ openstack_floating_ip.random provider.openstack provider.aws openstack_floating_ip.random +provider.aws (close) + aws_instance.web + aws_load_balancer.weblb + aws_security_group.firewall + provider.aws provider.openstack +provider.openstack (close) + openstack_floating_ip.random + provider.openstack +root + provider.aws (close) + provider.openstack (close) var.foo ` diff --git a/terraform/graph_builder_refresh.go b/terraform/graph_builder_refresh.go index 8fed21d61..88ae3380c 100644 --- a/terraform/graph_builder_refresh.go +++ b/terraform/graph_builder_refresh.go @@ -115,6 +115,9 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { // Target &TargetsTransformer{Targets: b.Targets}, + // Close opened plugin connections + &CloseProviderTransformer{}, + // Single root &RootTransformer{}, } From f1fb9e6ca59a1bd72e3bc0b07dfa2b8647c35a31 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 12 Apr 2017 23:28:28 +0200 Subject: [PATCH 100/342] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a2182449..0a1c7b1cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ ## 0.9.4 (Unreleased) +BUG FIXES: + * core: Add the close provider/provisioner transformers back [GH-13102] ## 0.9.3 (April 12, 2017) From e80a960dc5085a14a0085c3517bb90e3f787a6ab Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Cutrali Date: Wed, 12 Apr 2017 16:39:22 -0500 Subject: [PATCH 101/342] add bitbucket & gitlab to docs sidebar --- website/source/layouts/enterprise.erb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/source/layouts/enterprise.erb b/website/source/layouts/enterprise.erb index 16df41156..fb764a10f 100644 --- a/website/source/layouts/enterprise.erb +++ b/website/source/layouts/enterprise.erb @@ -113,6 +113,12 @@ > GitHub + > + GitLab + + > + BitBucket + > From a891c3cb82834d48d35d0b1deacf7ff382d71f30 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 12 Apr 2017 15:47:49 -0700 Subject: [PATCH 102/342] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a1c7b1cb..87aa7326c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] +FEATURES: + * google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] + ## 0.9.3 (April 12, 2017) BACKWARDS INCOMPATIBILITIES / NOTES: From 66273ad9b24af04e1b52ab07e8bc6e9e9be6955e Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 12 Apr 2017 15:48:12 -0700 Subject: [PATCH 103/342] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87aa7326c..eee3982b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] -FEATURES: +IMPROVEMENTS: * google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] ## 0.9.3 (April 12, 2017) From 444cf356492c91c4cabde1ec6777a7a2f57f2b4c Mon Sep 17 00:00:00 2001 From: John McGowan Date: Wed, 12 Apr 2017 17:11:26 -0600 Subject: [PATCH 104/342] Fixing some documentation issues with the Azurerm Traffic Manager Endpoint page --- .../azurerm/r/traffic_manager_endpoint.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown index 8e6e93d8a..8ddf80d24 100644 --- a/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown +++ b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown @@ -50,14 +50,14 @@ resource "azurerm_traffic_manager_endpoint" "test" { The following arguments are supported: -* `name` - (Required) The name of the virtual network. Changing this forces a +* `name` - (Required) The name of the Traffic Manager endpoint. Changing this forces a new resource to be created. * `resource_group_name` - (Required) The name of the resource group in which to - create the virtual network. + create the Traffic Manager endpoint. * `profile_name` - (Required) The name of the Traffic Manager Profile to attach - create the virtual network. + create the Traffic Manager endpoint. * `endpoint_status` - (Optional) The status of the Endpoint, can be set to either `Enabled` or `Disabled`. Defaults to `Enabled`. @@ -73,7 +73,7 @@ The following arguments are supported: * `target_resource_id` - (Optional) The resource id of an Azure resource to target. This argument must be provided for an endpoint of type - `azureEndpoints`. + `azureEndpoints` or `nestedEndpoints`. * `weight` - (Optional) Specifies how much traffic should be distributed to this endpoint, this must be specified for Profiles using the `Weighted` traffic From fb0533ee540b37a168cdd9228a0988128c3830c5 Mon Sep 17 00:00:00 2001 From: Vishnu Bharathi Date: Thu, 13 Apr 2017 11:54:10 +0530 Subject: [PATCH 105/342] Fixes kubernetes namespace name to have valid format (#13615) --- .../docs/providers/kubernetes/r/namespace.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/kubernetes/r/namespace.html.markdown b/website/source/docs/providers/kubernetes/r/namespace.html.markdown index 02375f5b5..f236820c4 100644 --- a/website/source/docs/providers/kubernetes/r/namespace.html.markdown +++ b/website/source/docs/providers/kubernetes/r/namespace.html.markdown @@ -24,7 +24,7 @@ resource "kubernetes_namespace" "example" { mylabel = "label-value" } - name = "TerraformExampleNamespace" + name = "terraform-example-namespace" } } @@ -59,5 +59,5 @@ The following arguments are supported: Namespaces can be imported using their name, e.g. ``` -$ terraform import kubernetes_namespace.n TerraformExampleNamespace +$ terraform import kubernetes_namespace.n terraform-example-namespace ``` From 336a37bea3141eb944f60acad4c970319a234b0c Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 13 Apr 2017 10:32:43 +0100 Subject: [PATCH 106/342] Quoting the title for the iam_role data source Fixes #13613 --- website/source/docs/providers/aws/d/iam_role.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/d/iam_role.html.markdown b/website/source/docs/providers/aws/d/iam_role.html.markdown index e93b16f12..85ce81de2 100644 --- a/website/source/docs/providers/aws/d/iam_role.html.markdown +++ b/website/source/docs/providers/aws/d/iam_role.html.markdown @@ -1,6 +1,6 @@ --- layout: "aws" -page_title: AWS: aws_iam_role +page_title: "AWS: aws_iam_role" sidebar_current: docs-aws-datasource-iam-role description: |- Get information on a Amazon IAM role From 5d64819ffa56a35ae8e69cb9dce51ad8ee205d8c Mon Sep 17 00:00:00 2001 From: Sean Chittenden Date: Thu, 13 Apr 2017 04:30:46 -0700 Subject: [PATCH 107/342] Update Triton API libraries to fix regression with CNS and machine tags. (#13612) * Update Triton API libraries (`joyent/triton-go`) to fix regression with CNS and machine tags. * Update checksum to match the latest upstream sha --- vendor/github.com/joyent/triton-go/client.go | 35 ++-- .../github.com/joyent/triton-go/machines.go | 163 +++++++++++++++++- vendor/vendor.json | 6 +- 3 files changed, 180 insertions(+), 24 deletions(-) diff --git a/vendor/github.com/joyent/triton-go/client.go b/vendor/github.com/joyent/triton-go/client.go index 2b840bba5..e34c6b1fe 100644 --- a/vendor/github.com/joyent/triton-go/client.go +++ b/vendor/github.com/joyent/triton-go/client.go @@ -10,7 +10,6 @@ import ( "net/http" "net/url" "os" - "strings" "time" "github.com/hashicorp/errwrap" @@ -22,7 +21,7 @@ import ( type Client struct { client *retryablehttp.Client authorizer []authentication.Signer - endpoint string + apiURL url.URL accountName string } @@ -36,6 +35,15 @@ func NewClient(endpoint string, accountName string, signers ...authentication.Si defaultRetryWaitMax := 5 * time.Minute defaultRetryMax := 32 + apiURL, err := url.Parse(endpoint) + if err != nil { + return nil, errwrap.Wrapf("invalid endpoint: {{err}}", err) + } + + if accountName == "" { + return nil, fmt.Errorf("account name can not be empty") + } + httpClient := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -62,7 +70,7 @@ func NewClient(endpoint string, accountName string, signers ...authentication.Si return &Client{ client: retryableClient, authorizer: signers, - endpoint: strings.TrimSuffix(endpoint, "/"), + apiURL: *apiURL, accountName: accountName, }, nil } @@ -71,10 +79,6 @@ func doNotFollowRedirects(*http.Request, []*http.Request) error { return http.ErrUseLastResponse } -func (c *Client) formatURL(path string) string { - return fmt.Sprintf("%s%s", c.endpoint, path) -} - func (c *Client) executeRequestURIParams(method, path string, body interface{}, query *url.Values) (io.ReadCloser, error) { var requestBody io.ReadSeeker if body != nil { @@ -85,7 +89,13 @@ func (c *Client) executeRequestURIParams(method, path string, body interface{}, requestBody = bytes.NewReader(marshaled) } - req, err := retryablehttp.NewRequest(method, c.formatURL(path), requestBody) + endpoint := c.apiURL + endpoint.Path = path + if query != nil { + endpoint.RawQuery = query.Encode() + } + + req, err := retryablehttp.NewRequest(method, endpoint.String(), requestBody) if err != nil { return nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) } @@ -106,10 +116,6 @@ func (c *Client) executeRequestURIParams(method, path string, body interface{}, req.Header.Set("Content-Type", "application/json") } - if query != nil { - req.URL.RawQuery = query.Encode() - } - resp, err := c.client.Do(req) if err != nil { return nil, errwrap.Wrapf("Error executing HTTP request: {{err}}", err) @@ -149,7 +155,10 @@ func (c *Client) executeRequestRaw(method, path string, body interface{}) (*http requestBody = bytes.NewReader(marshaled) } - req, err := retryablehttp.NewRequest(method, c.formatURL(path), requestBody) + endpoint := c.apiURL + endpoint.Path = path + + req, err := retryablehttp.NewRequest(method, endpoint.String(), requestBody) if err != nil { return nil, errwrap.Wrapf("Error constructing HTTP request: {{err}}", err) } diff --git a/vendor/github.com/joyent/triton-go/machines.go b/vendor/github.com/joyent/triton-go/machines.go index 8f2de9736..0fae69b4a 100644 --- a/vendor/github.com/joyent/triton-go/machines.go +++ b/vendor/github.com/joyent/triton-go/machines.go @@ -4,10 +4,12 @@ import ( "encoding/json" "fmt" "net/http" + "strings" "time" - "github.com/hashicorp/errwrap" "net/url" + + "github.com/hashicorp/errwrap" ) type MachinesClient struct { @@ -20,6 +22,21 @@ func (c *Client) Machines() *MachinesClient { return &MachinesClient{c} } +const ( + machineCNSTagDisable = "triton.cns.disable" + machineCNSTagReversePTR = "triton.cns.reverse_ptr" + machineCNSTagServices = "triton.cns.services" +) + +// MachineCNS is a container for the CNS-specific attributes. In the API these +// values are embedded within a Machine's Tags attribute, however they are +// exposed to the caller as their native types. +type MachineCNS struct { + Disable *bool + ReversePTR *string + Services []string +} + type Machine struct { ID string `json:"id"` Name string `json:"name"` @@ -41,6 +58,14 @@ type Machine struct { ComputeNode string `json:"compute_node"` Package string `json:"package"` DomainNames []string `json:"dns_names"` + CNS MachineCNS +} + +// _Machine is a private facade over Machine that handles the necessary API +// overrides from vmapi's machine endpoint(s). +type _Machine struct { + Machine + Tags map[string]interface{} `json:"tags"` } type NIC struct { @@ -57,7 +82,19 @@ type GetMachineInput struct { ID string } +func (gmi *GetMachineInput) Validate() error { + if gmi.ID == "" { + return fmt.Errorf("machine ID can not be empty") + } + + return nil +} + func (client *MachinesClient) GetMachine(input *GetMachineInput) (*Machine, error) { + if err := input.Validate(); err != nil { + return nil, errwrap.Wrapf("unable to get machine: {{err}}", err) + } + path := fmt.Sprintf("/%s/machines/%s", client.accountName, input.ID) response, err := client.executeRequestRaw(http.MethodGet, path, nil) if response != nil { @@ -73,13 +110,51 @@ func (client *MachinesClient) GetMachine(input *GetMachineInput) (*Machine, erro client.decodeError(response.StatusCode, response.Body)) } - var result *Machine + var result *_Machine decoder := json.NewDecoder(response.Body) if err = decoder.Decode(&result); err != nil { return nil, errwrap.Wrapf("Error decoding GetMachine response: {{err}}", err) } - return result, nil + native, err := result.toNative() + if err != nil { + return nil, errwrap.Wrapf("unable to convert API response for machines to native type: {{err}}", err) + } + + return native, nil +} + +func (client *MachinesClient) GetMachines() ([]*Machine, error) { + path := fmt.Sprintf("/%s/machines", client.accountName) + response, err := client.executeRequestRaw(http.MethodGet, path, nil) + if response != nil { + defer response.Body.Close() + } + if response.StatusCode == http.StatusNotFound { + return nil, &TritonError{ + Code: "ResourceNotFound", + } + } + if err != nil { + return nil, errwrap.Wrapf("Error executing GetMachines request: {{err}}", + client.decodeError(response.StatusCode, response.Body)) + } + + var results []*_Machine + decoder := json.NewDecoder(response.Body) + if err = decoder.Decode(&results); err != nil { + return nil, errwrap.Wrapf("Error decoding GetMachines response: {{err}}", err) + } + + machines := make([]*Machine, 0, len(results)) + for _, machineAPI := range results { + native, err := machineAPI.toNative() + if err != nil { + return nil, errwrap.Wrapf("unable to convert API response for machines to native type: {{err}}", err) + } + machines = append(machines, native) + } + return machines, nil } type CreateMachineInput struct { @@ -93,23 +168,31 @@ type CreateMachineInput struct { Metadata map[string]string Tags map[string]string FirewallEnabled bool + CNS MachineCNS } -func transformCreateMachineInput(input *CreateMachineInput) map[string]interface{} { - result := make(map[string]interface{}, 8+len(input.Metadata)+len(input.Tags)) +func (input *CreateMachineInput) toAPI() map[string]interface{} { + const numExtraParams = 8 + result := make(map[string]interface{}, numExtraParams+len(input.Metadata)+len(input.Tags)) + result["firewall_enabled"] = input.FirewallEnabled + if input.Name != "" { result["name"] = input.Name } + if input.Package != "" { result["package"] = input.Package } + if input.Image != "" { result["image"] = input.Image } + if len(input.Networks) > 0 { result["networks"] = input.Networks } + locality := struct { Strict bool `json:"strict"` Near []string `json:"near,omitempty"` @@ -123,6 +206,11 @@ func transformCreateMachineInput(input *CreateMachineInput) map[string]interface for key, value := range input.Tags { result[fmt.Sprintf("tag.%s", key)] = value } + + // Deliberately clobber any user-specified Tags with the attributes from the + // CNS struct. + input.CNS.toTags(result) + for key, value := range input.Metadata { result[fmt.Sprintf("metadata.%s", key)] = value } @@ -131,7 +219,7 @@ func transformCreateMachineInput(input *CreateMachineInput) map[string]interface } func (client *MachinesClient) CreateMachine(input *CreateMachineInput) (*Machine, error) { - respReader, err := client.executeRequest(http.MethodPost, "/my/machines", transformCreateMachineInput(input)) + respReader, err := client.executeRequest(http.MethodPost, "/my/machines", input.toAPI()) if respReader != nil { defer respReader.Close() } @@ -309,13 +397,14 @@ func (client *MachinesClient) ListMachineTags(input *ListMachineTagsInput) (map[ return nil, errwrap.Wrapf("Error executing ListMachineTags request: {{err}}", err) } - var result map[string]string + var result map[string]interface{} decoder := json.NewDecoder(respReader) if err = decoder.Decode(&result); err != nil { return nil, errwrap.Wrapf("Error decoding ListMachineTags response: {{err}}", err) } - return result, nil + _, tags := machineTagsExtractMeta(result) + return tags, nil } type UpdateMachineMetadataInput struct { @@ -470,3 +559,61 @@ func (client *MachinesClient) RemoveNIC(input *RemoveNICInput) error { return nil } + +var reservedMachineCNSTags = map[string]struct{}{ + machineCNSTagDisable: {}, + machineCNSTagReversePTR: {}, + machineCNSTagServices: {}, +} + +// machineTagsExtractMeta() extracts all of the misc parameters from Tags and +// returns a clean CNS and Tags struct. +func machineTagsExtractMeta(tags map[string]interface{}) (MachineCNS, map[string]string) { + nativeCNS := MachineCNS{} + nativeTags := make(map[string]string, len(tags)) + for k, raw := range tags { + if _, found := reservedMachineCNSTags[k]; found { + switch k { + case machineCNSTagDisable: + b := raw.(bool) + nativeCNS.Disable = &b + case machineCNSTagReversePTR: + s := raw.(string) + nativeCNS.ReversePTR = &s + case machineCNSTagServices: + nativeCNS.Services = strings.Split(raw.(string), ",") + default: + // TODO(seanc@): should assert, logic fail + } + } else { + nativeTags[k] = raw.(string) + } + } + + return nativeCNS, nativeTags +} + +// toNative() exports a given _Machine (API representation) to its native object +// format. +func (api *_Machine) toNative() (*Machine, error) { + m := Machine(api.Machine) + m.CNS, m.Tags = machineTagsExtractMeta(api.Tags) + return &m, nil +} + +// toTags() injects its state information into a Tags map suitable for use to +// submit an API call to the vmapi machine endpoint +func (mcns *MachineCNS) toTags(m map[string]interface{}) { + if mcns.Disable != nil { + s := fmt.Sprintf("%t", mcns.Disable) + m[machineCNSTagDisable] = &s + } + + if mcns.ReversePTR != nil { + m[machineCNSTagReversePTR] = &mcns.ReversePTR + } + + if len(mcns.Services) > 0 { + m[machineCNSTagServices] = strings.Join(mcns.Services, ",") + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index ed7976359..b7e96ab25 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2294,10 +2294,10 @@ "revisionTime": "2016-06-16T18:50:15Z" }, { - "checksumSHA1": "fue8Al8kqw/Q6VFPsNzoky7NIgo=", + "checksumSHA1": "2HimxaJVVp2QDVQ0570L71Zd5s4=", "path": "github.com/joyent/triton-go", - "revision": "66b31a94af28a65e902423879a2820ea34b773fb", - "revisionTime": "2017-03-31T18:12:29Z" + "revision": "5db9e2b6a4c1f7ffd2a7e7aa625f42dba956608c", + "revisionTime": "2017-04-12T23:23:58Z" }, { "checksumSHA1": "QzUqkCSn/ZHyIK346xb9V6EBw9U=", From 6efd0640ece1bfa689508ce8ef7666b018b9fc68 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Thu, 13 Apr 2017 05:37:15 -0600 Subject: [PATCH 108/342] Openstack port update fixes (#13604) * provider/openstack: Handle 409 Errors Upon Security Group Deletion If a security group is currently in use, it will throw a 409 error. This commit catches the 409, allowing other resources to finish deleting. * Update openstack_networking_port_v2 resource to pass empty arrays for AllowedAddressPairs and SecurityGroups if not specified. Fixes #13531 * provider/openstack: Port Update comment --- .../resource_openstack_networking_port_v2.go | 21 +-- ...ource_openstack_networking_port_v2_test.go | 151 ++++++++++++++++++ ...source_openstack_networking_secgroup_v2.go | 5 + 3 files changed, 164 insertions(+), 13 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2.go b/builtin/providers/openstack/resource_openstack_networking_port_v2.go index 508ebc813..4be432935 100644 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_port_v2.go @@ -236,7 +236,14 @@ func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating OpenStack networking client: %s", err) } - var updateOpts ports.UpdateOpts + // security_group_ids and allowed_address_pairs are able to send empty arrays + // to denote the removal of each. But their default zero-value is translated + // to "null", which has been reported to cause problems in vendor-modified + // OpenStack clouds. Therefore, we must set them in each request update. + updateOpts := ports.UpdateOpts{ + AllowedAddressPairs: resourceAllowedAddressPairsV2(d), + SecurityGroups: resourcePortSecurityGroupsV2(d), + } if d.HasChange("name") { updateOpts.Name = d.Get("name").(string) @@ -250,10 +257,6 @@ func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) er updateOpts.DeviceOwner = d.Get("device_owner").(string) } - if d.HasChange("security_group_ids") { - updateOpts.SecurityGroups = resourcePortSecurityGroupsV2(d) - } - if d.HasChange("device_id") { updateOpts.DeviceID = d.Get("device_id").(string) } @@ -262,10 +265,6 @@ func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) er updateOpts.FixedIPs = resourcePortFixedIpsV2(d) } - if d.HasChange("allowed_address_pairs") { - updateOpts.AllowedAddressPairs = resourceAllowedAddressPairsV2(d) - } - log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts) _, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract() @@ -332,10 +331,6 @@ func resourceAllowedAddressPairsV2(d *schema.ResourceData) []ports.AddressPair { // ports.AddressPair rawPairs := d.Get("allowed_address_pairs").(*schema.Set).List() - if len(rawPairs) == 0 { - return nil - } - pairs := make([]ports.AddressPair, len(rawPairs)) for i, raw := range rawPairs { rawMap := raw.(map[string]interface{}) diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go index 28e08bebd..a9d7281af 100644 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" @@ -168,6 +169,54 @@ func TestAccNetworkingV2Port_fixedIPs(t *testing.T) { }) } +func TestAccNetworkingV2Port_updateSecurityGroups(t *testing.T) { + var network networks.Network + var port ports.Port + var security_group groups.SecGroup + var subnet subnets.Subnet + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2PortDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Port_updateSecurityGroups_1, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), + testAccCheckNetworkingV2SecGroupExists( + "openstack_networking_secgroup_v2.secgroup_1", &security_group), + testAccCheckNetworkingV2PortCountSecurityGroups(&port, 1), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Port_updateSecurityGroups_2, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), + testAccCheckNetworkingV2SecGroupExists( + "openstack_networking_secgroup_v2.secgroup_1", &security_group), + testAccCheckNetworkingV2PortCountSecurityGroups(&port, 1), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Port_updateSecurityGroups_3, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), + testAccCheckNetworkingV2SecGroupExists( + "openstack_networking_secgroup_v2.secgroup_1", &security_group), + testAccCheckNetworkingV2PortCountSecurityGroups(&port, 0), + ), + }, + }, + }) +} + func testAccCheckNetworkingV2PortDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) networkingClient, err := config.networkingV2Client(OS_REGION_NAME) @@ -231,6 +280,16 @@ func testAccCheckNetworkingV2PortCountFixedIPs(port *ports.Port, expected int) r } } +func testAccCheckNetworkingV2PortCountSecurityGroups(port *ports.Port, expected int) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(port.SecurityGroups) != expected { + return fmt.Errorf("Expected %d Security Groups, got %d", expected, len(port.SecurityGroups)) + } + + return nil + } +} + const testAccNetworkingV2Port_basic = ` resource "openstack_networking_network_v2" "network_1" { name = "network_1" @@ -472,3 +531,95 @@ resource "openstack_networking_port_v2" "port_1" { } } ` + +const testAccNetworkingV2Port_updateSecurityGroups_1 = ` +resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "subnet_1" { + name = "subnet_1" + cidr = "192.168.199.0/24" + ip_version = 4 + network_id = "${openstack_networking_network_v2.network_1.id}" +} + +resource "openstack_networking_secgroup_v2" "secgroup_1" { + name = "security_group" + description = "terraform security group acceptance test" +} + +resource "openstack_networking_port_v2" "port_1" { + name = "port_1" + admin_state_up = "true" + network_id = "${openstack_networking_network_v2.network_1.id}" + + fixed_ip { + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + ip_address = "192.168.199.23" + } +} +` + +const testAccNetworkingV2Port_updateSecurityGroups_2 = ` +resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "subnet_1" { + name = "subnet_1" + cidr = "192.168.199.0/24" + ip_version = 4 + network_id = "${openstack_networking_network_v2.network_1.id}" +} + +resource "openstack_networking_secgroup_v2" "secgroup_1" { + name = "security_group" + description = "terraform security group acceptance test" +} + +resource "openstack_networking_port_v2" "port_1" { + name = "port_1" + admin_state_up = "true" + network_id = "${openstack_networking_network_v2.network_1.id}" + security_group_ids = ["${openstack_networking_secgroup_v2.secgroup_1.id}"] + + fixed_ip { + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + ip_address = "192.168.199.23" + } +} +` + +const testAccNetworkingV2Port_updateSecurityGroups_3 = ` +resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "subnet_1" { + name = "subnet_1" + cidr = "192.168.199.0/24" + ip_version = 4 + network_id = "${openstack_networking_network_v2.network_1.id}" +} + +resource "openstack_networking_secgroup_v2" "secgroup_1" { + name = "security_group" + description = "terraform security group acceptance test" +} + +resource "openstack_networking_port_v2" "port_1" { + name = "port_1" + admin_state_up = "true" + network_id = "${openstack_networking_network_v2.network_1.id}" + security_group_ids = [] + + fixed_ip { + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + ip_address = "192.168.199.23" + } +} +` diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go index f76d24c57..8dad6fad8 100644 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go @@ -167,6 +167,11 @@ func waitForSecGroupDelete(networkingClient *gophercloud.ServiceClient, secGroup log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) return r, "DELETED", nil } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return r, "ACTIVE", nil + } + } return r, "ACTIVE", err } From f5e08948a4df2ecfe0561a983969738dd83ba40e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 13 Apr 2017 13:38:03 +0200 Subject: [PATCH 109/342] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eee3982b7..00c0a28d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ BUG FIXES: IMPROVEMENTS: * google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] + +BUG FIXES: + + * provider/openstack: Fix updating Ports [GH-13604] ## 0.9.3 (April 12, 2017) From 2734a538877e05a7eeaaf3eedb6006e92604cfe0 Mon Sep 17 00:00:00 2001 From: Michael Warkentin Date: Thu, 13 Apr 2017 10:07:48 -0400 Subject: [PATCH 110/342] Grammar fix (#13626) --- website/source/docs/commands/init.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/commands/init.html.markdown b/website/source/docs/commands/init.html.markdown index aed3a97cb..1222b46c7 100644 --- a/website/source/docs/commands/init.html.markdown +++ b/website/source/docs/commands/init.html.markdown @@ -66,7 +66,7 @@ The command-line flags are all optional. The list of available flags are: ## Backend Config The `-backend-config` can take a path or `key=value` pair to specify additional -backend configuration when [initialize a backend](/docs/backends/init.html). +backend configuration when [initializing a backend](/docs/backends/init.html). This is particularly useful for [partial configuration of backends](/docs/backends/config.html). Partial From b0407adaf5e209a5f4630b36df97e11a048e0b0d Mon Sep 17 00:00:00 2001 From: Mark van den Boomen Date: Thu, 13 Apr 2017 17:55:07 +0200 Subject: [PATCH 111/342] Host based routing on ALB possible (#13623) Tried to use the Host based routing feature of the ALB (ELB v2.0) with Terraform 0.9.2. Worked perfectly, so updating the documentation. --- .../aws/r/alb_listener_rule.html.markdown | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown b/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown index 1d8598414..a868bed44 100644 --- a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown +++ b/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown @@ -36,6 +36,22 @@ resource "aws_alb_listener_rule" "static" { values = ["/static/*"] } } + +resource "aws_alb_listener_rule" "host_based_routing" { + listener_arn = "${aws_alb_listener.front_end.arn}" + priority = 99 + + action { + type = "forward" + target_group_arn = "${aws_alb_target_group.static.arn}" + } + + condition { + field = "host-header" + values = ["my-service.*.terraform.io"] + } +} + ``` ## Argument Reference @@ -54,7 +70,7 @@ Action Blocks (for `default_action`) support the following: Condition Blocks (for `default_condition`) support the following: -* `field` - (Required) The name of the field. The only valid value is `path-pattern`. +* `field` - (Required) The name of the field. Must be one of `path-pattern` for path based routing or `host-header` for host based routing. * `values` - (Required) The path patterns to match. A maximum of 1 can be defined. ## Attributes Reference From 09031a9133ee32f69b300ea6d70053e6e5a0e3b0 Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Thu, 13 Apr 2017 19:41:14 +0000 Subject: [PATCH 112/342] rdpool doc --- .../providers/ultradns/r/rdpool.html.markdown | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 website/source/docs/providers/ultradns/r/rdpool.html.markdown diff --git a/website/source/docs/providers/ultradns/r/rdpool.html.markdown b/website/source/docs/providers/ultradns/r/rdpool.html.markdown new file mode 100644 index 000000000..be7410fc1 --- /dev/null +++ b/website/source/docs/providers/ultradns/r/rdpool.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "ultradns" +page_title: "UltraDNS: ultradns_rdpool" +sidebar_current: "docs-ultradns-resource-rdpool" +description: |- + Provides an UltraDNS Resource Distribution pool resource. +--- + +# ultradns\_rdpool + +Provides an UltraDNS Resource Distribution (RD) pool resource, which are +used to define rules for returning multiple A or AAAA records for a given owner name. Ordering can be FIXED, RANDOM or ROUND_ROBIN. + +## Example Usage +``` +# Create a Resource Distribution pool + +resource "ultradns_rdpool" "pool" { + zone = "${var.ultradns_domain}" + name = "terraform-rdpool" + ttl = 600 + description = "Example RD Pool" + order = "ROUND_ROBIN" + rdata = [ "192.168.0.10", "192.168.0.11" ] +} +``` + +## Argument Reference + +See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values. + +The following arguments are supported: + +* `zone` - (Required) The domain to add the record to +* `name` - (Required) The name of the record +* `order` - (Required) Ordering rule, one of FIXED, RANDOM or ROUND_ROBIN +* `rdata` - (Required) list ip addresses. +* `description` - (Optional) Description of the Resource Distribution pool. Valid values are strings less than 256 characters. +* `ttl` - (Optional) The TTL of the pool in seconds. Default: `3600`. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The record ID +* `hostname` - The FQDN of the record From baaf6f5de692ebf788750815aed9d2381a3b16a6 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 13 Apr 2017 15:00:41 -0500 Subject: [PATCH 113/342] provider/aws: Fix some acc tests (more, again) (#13639) * add check destroy to subnet data source tests * provider/aws: Fix TestAccAWSElasticacheCluster_snapshotsWithUpdates by waiting for snapshots * provider/aws: Fix TestAccAWSRDSCluster_takeFinalSnapshot * provider/aws: Fix TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates by specifying instance type --- builtin/providers/aws/data_source_aws_subnet_ids_test.go | 5 +++-- builtin/providers/aws/data_source_aws_subnet_test.go | 7 ++++--- builtin/providers/aws/resource_aws_elasticache_cluster.go | 2 +- .../resource_aws_kinesis_firehose_delivery_stream_test.go | 3 +++ builtin/providers/aws/resource_aws_rds_cluster_test.go | 3 +-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/builtin/providers/aws/data_source_aws_subnet_ids_test.go b/builtin/providers/aws/data_source_aws_subnet_ids_test.go index 35de8379d..36f6c4b91 100644 --- a/builtin/providers/aws/data_source_aws_subnet_ids_test.go +++ b/builtin/providers/aws/data_source_aws_subnet_ids_test.go @@ -11,8 +11,9 @@ import ( func TestAccDataSourceAwsSubnetIDs(t *testing.T) { rInt := acctest.RandIntRange(0, 256) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVpcDestroy, Steps: []resource.TestStep{ { Config: testAccDataSourceAwsSubnetIDsConfig(rInt), diff --git a/builtin/providers/aws/data_source_aws_subnet_test.go b/builtin/providers/aws/data_source_aws_subnet_test.go index 73a900751..c6234ac39 100644 --- a/builtin/providers/aws/data_source_aws_subnet_test.go +++ b/builtin/providers/aws/data_source_aws_subnet_test.go @@ -9,12 +9,13 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccDataSourceAwsSubnet(t *testing.T) { +func TestAccDataSourceAwsSubnet_basic(t *testing.T) { rInt := acctest.RandIntRange(0, 256) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVpcDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccDataSourceAwsSubnetConfig(rInt), diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 36f07524c..1bd24d361 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -311,7 +311,7 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ // name contained uppercase characters. d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) - pending := []string{"creating", "modifying", "restoring"} + pending := []string{"creating", "modifying", "restoring", "snapshotting"} stateConf := &resource.StateChangeConf{ Pending: pending, Target: []string{"available"}, diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go index 7cc741901..27f227883 100644 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go @@ -534,6 +534,9 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { var testAccKinesisFirehoseDeliveryStreamBaseElasticsearchConfig = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` resource "aws_elasticsearch_domain" "test_cluster" { domain_name = "es-test-%d" + cluster_config { + instance_type = "r3.large.elasticsearch" + } access_policies = < Date: Thu, 13 Apr 2017 20:11:43 +0000 Subject: [PATCH 114/342] support for rdpool resource --- builtin/providers/ultradns/common_test.go | 23 ++ .../ultradns/resource_ultradns_rdpool.go | 243 ++++++++++++++++++ .../ultradns/resource_ultradns_rdpool_test.go | 100 +++++++ 3 files changed, 366 insertions(+) create mode 100644 builtin/providers/ultradns/resource_ultradns_rdpool.go create mode 100644 builtin/providers/ultradns/resource_ultradns_rdpool_test.go diff --git a/builtin/providers/ultradns/common_test.go b/builtin/providers/ultradns/common_test.go index 24470e0d3..05823fdcd 100644 --- a/builtin/providers/ultradns/common_test.go +++ b/builtin/providers/ultradns/common_test.go @@ -8,6 +8,29 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func testAccRdpoolCheckDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*udnssdk.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ultradns_rdpool" { + continue + } + + k := udnssdk.RRSetKey{ + Zone: rs.Primary.Attributes["zone"], + Name: rs.Primary.Attributes["name"], + Type: rs.Primary.Attributes["type"], + } + + _, err := client.RRSets.Select(k) + if err == nil { + return fmt.Errorf("Record still exists") + } + } + + return nil +} + func testAccTcpoolCheckDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*udnssdk.Client) diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool.go b/builtin/providers/ultradns/resource_ultradns_rdpool.go new file mode 100644 index 000000000..e67b57219 --- /dev/null +++ b/builtin/providers/ultradns/resource_ultradns_rdpool.go @@ -0,0 +1,243 @@ +package ultradns + +import ( + "fmt" + "log" + "strings" + + "github.com/Ensighten/udnssdk" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceUltradnsRdpool() *schema.Resource { + return &schema.Resource{ + Create: resourceUltradnsRdpoolCreate, + Read: resourceUltradnsRdpoolRead, + Update: resourceUltradnsRdpoolUpdate, + Delete: resourceUltradnsRdpoolDelete, + + Schema: map[string]*schema.Schema{ + // Required + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "order": &schema.Schema{ + Type: schema.TypeString, + Required: true, + // 0-255 char + // FIXED | RANDOM | ROUND_ROBIN + }, + "rdata": &schema.Schema{ + Type: schema.TypeSet, + Set: schema.HashString, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + // Optional + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // 0-255 char + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 3600, + }, + // Computed + "hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// CRUD Operations + +func resourceUltradnsRdpoolCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool create") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool create: %#v", r) + _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) + if err != nil { + return fmt.Errorf("create failed: %#v -> %v", r, err) + } + + d.SetId(r.ID()) + log.Printf("[INFO] ultradns_rdpool.id: %v", d.Id()) + + return resourceUltradnsRdpoolRead(d, meta) +} + +func resourceUltradnsRdpoolRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool read") + client := meta.(*udnssdk.Client) + + rr, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + rrsets, err := client.RRSets.Select(rr.RRSetKey()) + if err != nil { + uderr, ok := err.(*udnssdk.ErrorResponseList) + if ok { + for _, resps := range uderr.Responses { + // 70002 means Records Not Found + if resps.ErrorCode == 70002 { + d.SetId("") + return nil + } + return fmt.Errorf("resource not found: %v", err) + } + } + return fmt.Errorf("resource not found: %v", err) + } + + r := rrsets[0] + + zone := d.Get("zone") + // ttl + d.Set("ttl", r.TTL) + // hostname + if r.OwnerName == "" { + d.Set("hostname", zone) + } else { + if strings.HasSuffix(r.OwnerName, ".") { + d.Set("hostname", r.OwnerName) + } else { + d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) + } + } + + // And now... the Profile! + if r.Profile == nil { + return fmt.Errorf("RRSet.profile missing: invalid RDPool schema in: %#v", r) + } + p, err := r.Profile.RDPoolProfile() + if err != nil { + return fmt.Errorf("RRSet.profile could not be unmarshalled: %v\n", err) + } + + // Set simple values + d.Set("description", p.Description) + d.Set("order", p.Order) + + // TODO: rigorously test this to see if we can remove the error handling + + //TODO + + //err = d.Set("rdata", makeSetFromStrings(r.RData)) + //err = d.Set("rdata", makeSetFromRdataAlone(r.RData)) + if err != nil { + return fmt.Errorf("rdata set failed: %#v", err) + } + return nil +} + +func resourceUltradnsRdpoolUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool update") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool update: %+v", r) + _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) + if err != nil { + return fmt.Errorf("resource update failed: %v", err) + } + + return resourceUltradnsRdpoolRead(d, meta) +} + +func resourceUltradnsRdpoolDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool delete") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool delete: %+v", r) + _, err = client.RRSets.Delete(r.RRSetKey()) + if err != nil { + return fmt.Errorf("resource delete failed: %v", err) + } + + return nil +} + +// Resource Helpers + +func newRRSetResourceFromRdpool(d *schema.ResourceData) (rRSetResource, error) { + //rDataRaw := d.Get("rdata").(*schema.Set).List() + r := rRSetResource{ + // "The only valid rrtype value for SiteBacker or Traffic Controller pools is A" + // per https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf + RRType: "A", + Zone: d.Get("zone").(string), + OwnerName: d.Get("name").(string), + TTL: d.Get("ttl").(int), + //RData: unzipRdataHosts(rDataRaw), + } + if attr, ok := d.GetOk("rdata"); ok { + rdata := attr.(*schema.Set).List() + r.RData = make([]string, len(rdata)) + for i, j := range rdata { + r.RData[i] = j.(string) + } + } + + profile := udnssdk.RDPoolProfile{ + Context: udnssdk.RDPoolSchema, + Order: d.Get("order").(string), + Description: d.Get("description").(string), + } + + rp := profile.RawProfile() + r.Profile = rp + + return r, nil +} + +// zip RData into []map[string]interface{} +func zipRDataAlone(rds []string) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(rds)) + for _, rd := range rds { + r := map[string]interface{}{ + // "host": rds[i], + "host": rd, + } + result = append(result, r) + } + return result +} + +// makeSetFromRdatas encodes an array of Rdata into a +// *schema.Set in the appropriate structure for the schema +func makeSetFromRdataAlone(rds []string) *schema.Set { + s := &schema.Set{F: hashRdatas} + rs := zipRDataAlone(rds) + for _, r := range rs { + s.Add(r) + } + return s +} diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool_test.go b/builtin/providers/ultradns/resource_ultradns_rdpool_test.go new file mode 100644 index 000000000..1ddd9c025 --- /dev/null +++ b/builtin/providers/ultradns/resource_ultradns_rdpool_test.go @@ -0,0 +1,100 @@ +package ultradns + +import ( + "fmt" + "testing" + + "github.com/Ensighten/udnssdk" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccUltradnsRdpool(t *testing.T) { + var record udnssdk.RRSet + domain := "ultradns.phinze.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccRdpoolCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf(testCfgRdpoolMinimal, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), + // Specified + resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-minimal"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), + + // hashRdatas(): 10.6.0.1 -> 2847814707 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.host", "10.6.0.1"), + // Defaults + resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "Minimal RD Pool"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.priority", "1"), + // Generated + resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-minimal.ultradns.phinze.com"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-minimal.ultradns.phinze.com."), + ), + }, + resource.TestStep{ + Config: fmt.Sprintf(testCfgRdpoolMaximal, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), + // Specified + resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-maximal"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "traffic controller pool with all settings tuned"), + + resource.TestCheckResourceAttr("ultradns_rdpool.it", "act_on_probes", "false"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "max_to_lb", "2"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "run_probes", "false"), + + // hashRdatas(): 10.6.1.1 -> 2826722820 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.host", "10.6.1.1"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.priority", "1"), + + // hashRdatas(): 10.6.1.2 -> 829755326 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.host", "10.6.1.2"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.priority", "2"), + + // Generated + resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-maximal.ultradns.phinze.com"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-maximal.ultradns.phinze.com."), + ), + }, + }, + }) +} + +const testCfgRdpoolMinimal = ` +resource "ultradns_rdpool" "it" { + zone = "%s" + name = "test-rdpool-minimal" + ttl = 300 + description = "Minimal RD Pool" + + rdata { + host = "10.6.0.1" + } +} +` + +const testCfgRdpoolMaximal = ` +resource "ultradns_rdpool" "it" { + zone = "%s" + name = "test-rdpool-maximal" + order = "ROUND_ROBIN" + ttl = 300 + description = "traffic controller pool with all settings tuned" + rdata { + host = "10.6.1.1" + priority = 1 + } + + rdata { + host = "10.6.1.2" + priority = 2 + } +} +` From 923587a0c9034cf453d55d88d2dbd7fe753b2631 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 13 Apr 2017 15:17:39 -0500 Subject: [PATCH 115/342] provider/aws: Randomize and fix Dynamo DB test table names --- .../aws/import_aws_dynamodb_table_test.go | 4 +++- .../aws/resource_aws_dynamodb_table_test.go | 19 +++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/import_aws_dynamodb_table_test.go b/builtin/providers/aws/import_aws_dynamodb_table_test.go index dc5e2feab..989fece7e 100644 --- a/builtin/providers/aws/import_aws_dynamodb_table_test.go +++ b/builtin/providers/aws/import_aws_dynamodb_table_test.go @@ -3,19 +3,21 @@ package aws import ( "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccAWSDynamoDbTable_importBasic(t *testing.T) { resourceName := "aws_dynamodb_table.basic-dynamodb-table" + rName := acctest.RandomWithPrefix("TerraformTestTable-") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDynamoDbConfigInitialState(), + Config: testAccAWSDynamoDbConfigInitialState(rName), }, { diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go index fba65851c..c2d6f33ff 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go @@ -16,20 +16,22 @@ import ( func TestAccAWSDynamoDbTable_basic(t *testing.T) { var conf dynamodb.DescribeTableOutput + rName := acctest.RandomWithPrefix("TerraformTestTable-") + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDynamoDbConfigInitialState(), + Config: testAccAWSDynamoDbConfigInitialState(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"), ), }, { - Config: testAccAWSDynamoDbConfigAddSecondaryGSI, + Config: testAccAWSDynamoDbConfigAddSecondaryGSI(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDynamoDbTableWasUpdated("aws_dynamodb_table.basic-dynamodb-table"), ), @@ -363,10 +365,10 @@ func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[st return attrmap } -func testAccAWSDynamoDbConfigInitialState() string { +func testAccAWSDynamoDbConfigInitialState(rName string) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "TerraformTestTable-%d" + name = "%s" read_capacity = 10 write_capacity = 20 hash_key = "TestTableHashKey" @@ -407,12 +409,13 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" { projection_type = "KEYS_ONLY" } } -`, acctest.RandInt()) +`, rName) } -const testAccAWSDynamoDbConfigAddSecondaryGSI = ` +func testAccAWSDynamoDbConfigAddSecondaryGSI(rName string) string { + return fmt.Sprintf(` resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "TerraformTestTable" + name = "%s" read_capacity = 20 write_capacity = 20 hash_key = "TestTableHashKey" @@ -453,8 +456,8 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" { projection_type = "INCLUDE" non_key_attributes = ["TestNonKeyAttribute"] } +}`, rName) } -` func testAccAWSDynamoDbConfigStreamSpecification() string { return fmt.Sprintf(` From 80b6ce9194d8aef0fda1c9f1e5bcab006900a1ff Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Thu, 13 Apr 2017 20:19:08 +0000 Subject: [PATCH 116/342] add rdpool --- builtin/providers/ultradns/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/ultradns/provider.go b/builtin/providers/ultradns/provider.go index e10015ab2..70f0dbed7 100644 --- a/builtin/providers/ultradns/provider.go +++ b/builtin/providers/ultradns/provider.go @@ -38,6 +38,7 @@ func Provider() terraform.ResourceProvider { "ultradns_probe_ping": resourceUltradnsProbePing(), "ultradns_record": resourceUltradnsRecord(), "ultradns_tcpool": resourceUltradnsTcpool(), + "ultradns_rdpool": resourceUltradnsRdpool(), }, ConfigureFunc: providerConfigure, From 89edd2859c9d18f4133d418a23db8855aff0f4eb Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 13 Apr 2017 15:24:18 -0500 Subject: [PATCH 117/342] fix go fmt errors, my bad --- builtin/providers/aws/import_aws_dynamodb_table_test.go | 6 +++--- builtin/providers/aws/resource_aws_dynamodb_table_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/import_aws_dynamodb_table_test.go b/builtin/providers/aws/import_aws_dynamodb_table_test.go index 989fece7e..00fa2169d 100644 --- a/builtin/providers/aws/import_aws_dynamodb_table_test.go +++ b/builtin/providers/aws/import_aws_dynamodb_table_test.go @@ -3,21 +3,21 @@ package aws import ( "testing" - "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccAWSDynamoDbTable_importBasic(t *testing.T) { resourceName := "aws_dynamodb_table.basic-dynamodb-table" - rName := acctest.RandomWithPrefix("TerraformTestTable-") + rName := acctest.RandomWithPrefix("TerraformTestTable-") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDynamoDbConfigInitialState(rName), + Config: testAccAWSDynamoDbConfigInitialState(rName), }, { diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go index c2d6f33ff..fe2ce175f 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go @@ -16,7 +16,7 @@ import ( func TestAccAWSDynamoDbTable_basic(t *testing.T) { var conf dynamodb.DescribeTableOutput - rName := acctest.RandomWithPrefix("TerraformTestTable-") + rName := acctest.RandomWithPrefix("TerraformTestTable-") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -24,14 +24,14 @@ func TestAccAWSDynamoDbTable_basic(t *testing.T) { CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDynamoDbConfigInitialState(rName), + Config: testAccAWSDynamoDbConfigInitialState(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"), ), }, { - Config: testAccAWSDynamoDbConfigAddSecondaryGSI(rName), + Config: testAccAWSDynamoDbConfigAddSecondaryGSI(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDynamoDbTableWasUpdated("aws_dynamodb_table.basic-dynamodb-table"), ), @@ -413,7 +413,7 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" { } func testAccAWSDynamoDbConfigAddSecondaryGSI(rName string) string { - return fmt.Sprintf(` + return fmt.Sprintf(` resource "aws_dynamodb_table" "basic-dynamodb-table" { name = "%s" read_capacity = 20 From 25cbbdea8a9ff693f0da4c72c9220fcd3c988440 Mon Sep 17 00:00:00 2001 From: Mathieu Herbert Date: Thu, 13 Apr 2017 22:25:29 +0200 Subject: [PATCH 118/342] provider/google: datasource subnetwork and network (#12442) * first version of this datasource * add network and subnetwork datasource and documentation * modify sidebar reference in documentation * fix elements after review on network and subnetwork datasources * fix fmt on Google provider.go * modify code with the review * modify documentation layout order * fix alphabetic order in provider.go * fix rebase issue and documentation datasource => data --- .../data_source_google_compute_network.go | 73 ++++++++++++++++ ...data_source_google_compute_network_test.go | 68 +++++++++++++++ .../data_source_google_compute_subnetwork.go | 87 +++++++++++++++++++ ...a_source_google_compute_subnetwork_test.go | 81 +++++++++++++++++ builtin/providers/google/provider.go | 6 +- .../datasource_compute_network.html.markdown | 46 ++++++++++ ...atasource_compute_subnetwork.html.markdown | 50 +++++++++++ website/source/layouts/google.erb | 6 ++ 8 files changed, 415 insertions(+), 2 deletions(-) create mode 100644 builtin/providers/google/data_source_google_compute_network.go create mode 100644 builtin/providers/google/data_source_google_compute_network_test.go create mode 100644 builtin/providers/google/data_source_google_compute_subnetwork.go create mode 100644 builtin/providers/google/data_source_google_compute_subnetwork_test.go create mode 100644 website/source/docs/providers/google/d/datasource_compute_network.html.markdown create mode 100644 website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown diff --git a/builtin/providers/google/data_source_google_compute_network.go b/builtin/providers/google/data_source_google_compute_network.go new file mode 100644 index 000000000..b22d2b257 --- /dev/null +++ b/builtin/providers/google/data_source_google_compute_network.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeNetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeNetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "subnetworks_self_links": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + network, err := config.clientCompute.Networks.Get( + project, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Network Not Found : %s", d.Get("name")) + } + + return fmt.Errorf("Error reading network: %s", err) + } + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("description", network.Description) + d.Set("subnetworks_self_links", network.Subnetworks) + d.SetId(network.Name) + return nil +} diff --git a/builtin/providers/google/data_source_google_compute_network_test.go b/builtin/providers/google/data_source_google_compute_network_test.go new file mode 100644 index 000000000..bbf70af67 --- /dev/null +++ b/builtin/providers/google/data_source_google_compute_network_test.go @@ -0,0 +1,68 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "testing" +) + +func TestAccDataSourceGoogleNetwork(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: TestAccDataSourceGoogleNetworkConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + network_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + } + + for _, attr_to_check := range network_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + return nil + } +} + +var TestAccDataSourceGoogleNetworkConfig = ` +resource "google_compute_network" "foobar" { + name = "network-test" + description = "my-description" +} + +data "google_compute_network" "my_network" { + name = "${google_compute_network.foobar.name}" +}` diff --git a/builtin/providers/google/data_source_google_compute_subnetwork.go b/builtin/providers/google/data_source_google_compute_subnetwork.go new file mode 100644 index 000000000..bff489ba3 --- /dev/null +++ b/builtin/providers/google/data_source_google_compute_subnetwork.go @@ -0,0 +1,87 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeSubnetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + + subnetwork, err := config.clientCompute.Subnetworks.Get( + project, region, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Subnetwork Not Found") + } + + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + + d.Set("ip_cidr_range", subnetwork.IpCidrRange) + d.Set("self_link", subnetwork.SelfLink) + d.Set("description", subnetwork.Description) + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("network", subnetwork.Network) + + //Subnet id creation is defined in resource_compute_subnetwork.go + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + return nil +} diff --git a/builtin/providers/google/data_source_google_compute_subnetwork_test.go b/builtin/providers/google/data_source_google_compute_subnetwork_test.go new file mode 100644 index 000000000..f3d8516da --- /dev/null +++ b/builtin/providers/google/data_source_google_compute_subnetwork_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceGoogleSubnetwork(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: TestAccDataSourceGoogleSubnetworkConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + + subnetwork_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + "ip_cidr_range", + "network", + } + + for _, attr_to_check := range subnetwork_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + + return nil + } +} + +var TestAccDataSourceGoogleSubnetworkConfig = ` + +resource "google_compute_network" "foobar" { + name = "network-test" + description = "my-description" +} +resource "google_compute_subnetwork" "foobar" { + name = "subnetwork-test" + description = "my-description" + ip_cidr_range = "10.0.0.0/24" + network = "${google_compute_network.foobar.self_link}" +} + +data "google_compute_subnetwork" "my_subnetwork" { + name = "${google_compute_subnetwork.foobar.name}" +} +` diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 7562609c3..f302e00ca 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -48,8 +48,10 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "google_iam_policy": dataSourceGoogleIamPolicy(), - "google_compute_zones": dataSourceGoogleComputeZones(), + "google_compute_network": dataSourceGoogleComputeNetwork(), + "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), + "google_compute_zones": dataSourceGoogleComputeZones(), + "google_iam_policy": dataSourceGoogleIamPolicy(), }, ResourcesMap: map[string]*schema.Resource{ diff --git a/website/source/docs/providers/google/d/datasource_compute_network.html.markdown b/website/source/docs/providers/google/d/datasource_compute_network.html.markdown new file mode 100644 index 000000000..8e09f33c3 --- /dev/null +++ b/website/source/docs/providers/google/d/datasource_compute_network.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "google" +page_title: "Google: google_compute_network" +sidebar_current: "docs-google-datasource-compute-network" +description: |- + Get a network within GCE. +--- + +# google\_compute\_network + +Get a network within GCE from its name. + +## Example Usage + +```tf +data "google_compute_network" "my-network" { + name = "default-us-east1" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the network. + + +- - - + +* `project` - (Optional) The project in which the resource belongs. If it + is not provided, the provider project is used. + +## Attributes Reference + +In addition to the arguments listed above, the following attributes are exported: + +* `network` - The network name or resource link to the parent + network of this network. + +* `description` - Description of this network. + +* `gateway_ipv4` - The IP address of the gateway. + +* `subnetworks_self_links` - the list of subnetworks which belong to the network + +* `self_link` - The URI of the resource. diff --git a/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown b/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown new file mode 100644 index 000000000..faf6b99f9 --- /dev/null +++ b/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown @@ -0,0 +1,50 @@ +--- +layout: "google" +page_title: "Google: google_compute_subnetwork" +sidebar_current: "docs-google-datasource-compute-subnetwork" +description: |- + Get a subnetwork within GCE. +--- + +# google\_compute\_subnetwork + +Get a subnetwork within GCE from its name and region. + +## Example Usage + +```tf +data "google_compute_subnetwork" "my-subnetwork" { + name = "default-us-east1" + region = "us-east1" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - The name of the subnetwork. + +- - - + +* `project` - (Optional) The project in which the resource belongs. If it + is not provided, the provider project is used. + +* `region` - (Optional) The region this subnetwork has been created in. If + unspecified, this defaults to the region configured in the provider. + +## Attributes Reference + +In addition to the arguments listed above, the following attributes are exported: + +* `network` - The network name or resource link to the parent + network of this subnetwork. + +* `description` - Description of this subnetwork. + +* `ip_cidr_range` - The IP address range that machines in this + network are assigned to, represented as a CIDR block. + +* `gateway_address` - The IP address of the gateway. + +* `self_link` - The URI of the created resource. diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb index efc3a7455..b615b13bc 100644 --- a/website/source/layouts/google.erb +++ b/website/source/layouts/google.erb @@ -13,6 +13,12 @@ > Google Cloud Platform Data Sources diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index 2e517749c..d4fea022d 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -75,6 +75,7 @@
  • Intro
  • Docs
  • Community
  • +
  • Enterprise
  • <%= inline_svg "download.svg" %> Download From 586593cc16d34157ea1ef75dac2d21fcffec1389 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Fri, 14 Apr 2017 12:16:59 -0400 Subject: [PATCH 128/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d6294a74..3b5d43a9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ IMPROVEMENTS: BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] + * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/openstack: Fix updating Ports [GH-13604] From 41baf7d811f0a7cbc77f4254337005ca199b6f24 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 14 Apr 2017 12:54:38 -0500 Subject: [PATCH 129/342] provider/aws: Wait for snapshotting in delete of ElastiCache cluster --- builtin/providers/aws/resource_aws_elasticache_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 1bd24d361..03e6f8489 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -572,7 +572,7 @@ func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"}, + Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed", "snapshotting"}, Target: []string{}, Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), Timeout: 40 * time.Minute, From 7b32ad2b9931c5b6cf3a26fe32fefb49a8780ad4 Mon Sep 17 00:00:00 2001 From: Roman Laguta Date: Fri, 14 Apr 2017 21:09:37 +0300 Subject: [PATCH 130/342] Update cloudwatch_log_subscription_filter.html.markdown (#13659) Currently CloudWatch log subscription supports Lambda as a destination. And we can use `aws_cloudwatch_log_subscription_filter` resource for creating subscriptions with Lambda as a destination, but it needs some additional actions. I described them in description, but feel free to improve description if you can say the same better. This change will help better understand abilities of using this resource. --- .../aws/r/cloudwatch_log_subscription_filter.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown index 1e5aa2a15..882654e07 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown @@ -27,10 +27,10 @@ resource "aws_cloudwatch_log_subscription_filter" "test_lambdafunction_logfilter The following arguments are supported: * `name` - (Required) A name for the subscription filter -* `destination_arn` - (Required) The ARN of the destination to deliver matching log events to. Currently only Kinesis stream / a logical destination +* `destination_arn` - (Required) The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN. * `filter_pattern` - (Required) A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events. * `log_group_name` - (Required) The name of the log group to associate the subscription filter with -* `role_arn` - (Optional) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver ingested log events to the destination stream +* `role_arn` - (Optional) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver ingested log events to the destination. If you use Lambda as a destination, you should skip this argument and use `aws_lambda_permission` resource for granting access from CloudWatch logs to the destination Lambda function. ## Attributes Reference From 928e60672f4eca77dc1d0734379d9f103c13ca69 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 14 Apr 2017 10:53:08 -0400 Subject: [PATCH 131/342] context Refresh and Apply sometimes return nil The documentation for Refresh indicates that it will always return a valid state, but that wasn't true in the case of a graph builder error. While this same concept wasn't documented for Apply, it was still assumed in the terraform apply code. Since the helper testing framework relies on the absence of a state to determine if it can call Destroy, the Context can't can't start returning a state in all cases. Document this, and use the State method to fetch the correct state value after Apply. Add a nil check to the WriteState function, so that writing a nil state is a noop. Make sure to init before sorting the state, to make sure we're not attempting to sort nil values. This isn't technically needed with the current code, but it's just safer in general. --- backend/local/backend_apply.go | 4 +++- terraform/context.go | 15 ++++++++++++--- terraform/state.go | 21 +++++++++++++-------- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/backend/local/backend_apply.go b/backend/local/backend_apply.go index d7bf534a1..8fec2019e 100644 --- a/backend/local/backend_apply.go +++ b/backend/local/backend_apply.go @@ -102,7 +102,9 @@ func (b *Local) opApply( doneCh := make(chan struct{}) go func() { defer close(doneCh) - applyState, applyErr = tfCtx.Apply() + _, applyErr = tfCtx.Apply() + // we always want the state, even if apply failed + applyState = tfCtx.State() /* // Record any shadow errors for later diff --git a/terraform/context.go b/terraform/context.go index 15528beed..306128edf 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -453,8 +453,17 @@ func (c *Context) Input(mode InputMode) error { // Apply applies the changes represented by this context and returns // the resulting state. // -// In addition to returning the resulting state, this context is updated -// with the latest state. +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. func (c *Context) Apply() (*State, error) { defer c.acquireRun("apply")() @@ -580,7 +589,7 @@ func (c *Context) Plan() (*Plan, error) { // to their latest state. This will update the state that this context // works with, along with returning it. // -// Even in the case an error is returned, the state will be returned and +// Even in the case an error is returned, the state may be returned and // will potentially be partially updated. func (c *Context) Refresh() (*State, error) { defer c.acquireRun("refresh")() diff --git a/terraform/state.go b/terraform/state.go index 78725e88d..84d4c2669 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -1960,12 +1960,12 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + return state, nil } @@ -1995,12 +1995,12 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + // Now we write the state back out to detect any changes in normaliztion. // If our state is now written out differently, bump the serial number to // prevent conflicts. @@ -2020,12 +2020,17 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // WriteState writes a state somewhere in a binary format. func WriteState(d *State, dst io.Writer) error { - // Make sure it is sorted - d.sort() + // writing a nil state is a noop. + if d == nil { + return nil + } // make sure we have no uninitialized fields d.init() + // Make sure it is sorted + d.sort() + // Ensure the version is set d.Version = StateVersion From 046cc9b9aa3924e77467bd1276353d2071510150 Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Fri, 14 Apr 2017 14:58:44 -0400 Subject: [PATCH 132/342] Google Addresses: Set name field on read --- builtin/providers/google/resource_compute_address.go | 6 ++---- builtin/providers/google/resource_compute_global_address.go | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go index 54a60cc0a..27b4c1805 100644 --- a/builtin/providers/google/resource_compute_address.go +++ b/builtin/providers/google/resource_compute_address.go @@ -15,10 +15,7 @@ func resourceComputeAddress() *schema.Resource { Read: resourceComputeAddressRead, Delete: resourceComputeAddressDelete, Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil - }, + State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -113,6 +110,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error d.Set("address", addr.Address) d.Set("self_link", addr.SelfLink) + d.Set("name", addr.Name) return nil } diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go index 7f4df04a4..bf6a6a6d6 100644 --- a/builtin/providers/google/resource_compute_global_address.go +++ b/builtin/providers/google/resource_compute_global_address.go @@ -15,10 +15,7 @@ func resourceComputeGlobalAddress() *schema.Resource { Read: resourceComputeGlobalAddressRead, Delete: resourceComputeGlobalAddressDelete, Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil - }, + State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -97,6 +94,7 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) d.Set("address", addr.Address) d.Set("self_link", addr.SelfLink) + d.Set("name", addr.Name) return nil } From 46809cc2cc5a57afb29cb76821e127fbcf3bceb6 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 14 Apr 2017 14:13:09 -0500 Subject: [PATCH 133/342] provider/aws: randomize ECS name --- .../aws/resource_aws_ecs_service_test.go | 113 ++++++++---------- 1 file changed, 52 insertions(+), 61 deletions(-) diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go index 6440393ba..53d38ccbd 100644 --- a/builtin/providers/aws/resource_aws_ecs_service_test.go +++ b/builtin/providers/aws/resource_aws_ecs_service_test.go @@ -108,20 +108,21 @@ func TestAccAWSEcsServiceWithARN(t *testing.T) { } func TestAccAWSEcsServiceWithFamilyAndRevision(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-test") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithFamilyAndRevision, + Config: testAccAWSEcsServiceWithFamilyAndRevision(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), ), }, { - Config: testAccAWSEcsServiceWithFamilyAndRevisionModified, + Config: testAccAWSEcsServiceWithFamilyAndRevisionModified(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), ), @@ -243,14 +244,13 @@ func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { } func TestAccAWSEcsService_withAlb(t *testing.T) { - rString := acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithAlb(rString), + Config: testAccAWSEcsServiceWithAlb, Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.with_alb"), ), @@ -301,14 +301,13 @@ func TestAccAWSEcsServiceWithPlacementConstraints(t *testing.T) { } func TestAccAWSEcsServiceWithPlacementConstraints_emptyExpression(t *testing.T) { - rInt := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithPlacementConstraintEmptyExpression(rInt), + Config: testAccAWSEcsServiceWithPlacementConstraintEmptyExpression, Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_constraints.#", "1"), @@ -485,36 +484,34 @@ resource "aws_ecs_service" "mongo" { } ` -func testAccAWSEcsServiceWithPlacementConstraintEmptyExpression(rInt int) string { - return fmt.Sprintf(` - resource "aws_ecs_cluster" "default" { - name = "terraformecstest%d" - } - resource "aws_ecs_task_definition" "mongo" { - family = "mongodb" - container_definitions = < Date: Fri, 14 Apr 2017 21:12:04 +0100 Subject: [PATCH 134/342] provider/aws: Use mutex & retry for WAF change operations (#13656) --- .../aws/resource_aws_waf_byte_match_set.go | 91 +++++++--------- .../resource_aws_waf_byte_match_set_test.go | 62 +++++------ .../providers/aws/resource_aws_waf_ipset.go | 89 +++++++-------- .../aws/resource_aws_waf_ipset_test.go | 59 +++++----- .../providers/aws/resource_aws_waf_rule.go | 91 +++++++--------- .../aws/resource_aws_waf_rule_test.go | 61 +++++------ .../resource_aws_waf_size_constraint_set.go | 90 +++++++--------- ...source_aws_waf_size_constraint_set_test.go | 60 +++++------ ...esource_aws_waf_sql_injection_match_set.go | 86 +++++++-------- ...ce_aws_waf_sql_injection_match_set_test.go | 58 +++++----- .../providers/aws/resource_aws_waf_web_acl.go | 102 ++++++++---------- .../aws/resource_aws_waf_web_acl_test.go | 61 +++++------ .../aws/resource_aws_waf_xss_match_set.go | 85 +++++++-------- .../resource_aws_waf_xss_match_set_test.go | 58 +++++----- builtin/providers/aws/waf_token_handlers.go | 49 +++++++++ 15 files changed, 512 insertions(+), 590 deletions(-) create mode 100644 builtin/providers/aws/waf_token_handlers.go diff --git a/builtin/providers/aws/resource_aws_waf_byte_match_set.go b/builtin/providers/aws/resource_aws_waf_byte_match_set.go index b3a02b7d0..c28359351 100644 --- a/builtin/providers/aws/resource_aws_waf_byte_match_set.go +++ b/builtin/providers/aws/resource_aws_waf_byte_match_set.go @@ -69,24 +69,18 @@ func resourceAwsWafByteMatchSetCreate(d *schema.ResourceData, meta interface{}) log.Printf("[INFO] Creating ByteMatchSet: %s", d.Get("name").(string)) - // ChangeToken - var ct *waf.GetChangeTokenInput - - res, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - params := &waf.CreateByteMatchSetInput{ - ChangeToken: res.ChangeToken, - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateByteMatchSet(params) - + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateByteMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateByteMatchSet(params) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error creating ByteMatchSet: {{err}}", err) } + resp := out.(*waf.CreateByteMatchSetOutput) d.SetId(*resp.ByteMatchSet.ByteMatchSetId) @@ -134,17 +128,14 @@ func resourceAwsWafByteMatchSetDelete(d *schema.ResourceData, meta interface{}) return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) } - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteByteMatchSetInput{ - ChangeToken: resp.ChangeToken, - ByteMatchSetId: aws.String(d.Id()), - } - - _, err = conn.DeleteByteMatchSet(req) - + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(d.Id()), + } + return conn.DeleteByteMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) } @@ -155,34 +146,30 @@ func resourceAwsWafByteMatchSetDelete(d *schema.ResourceData, meta interface{}) func updateByteMatchSetResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: resp.ChangeToken, - ByteMatchSetId: aws.String(d.Id()), - } - - ByteMatchTuples := d.Get("byte_match_tuples").(*schema.Set) - for _, ByteMatchTuple := range ByteMatchTuples.List() { - ByteMatch := ByteMatchTuple.(map[string]interface{}) - ByteMatchUpdate := &waf.ByteMatchSetUpdate{ - Action: aws.String(ChangeAction), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(ByteMatch["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - PositionalConstraint: aws.String(ByteMatch["positional_constraint"].(string)), - TargetString: []byte(ByteMatch["target_string"].(string)), - TextTransformation: aws.String(ByteMatch["text_transformation"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(d.Id()), } - req.Updates = append(req.Updates, ByteMatchUpdate) - } - _, err = conn.UpdateByteMatchSet(req) + ByteMatchTuples := d.Get("byte_match_tuples").(*schema.Set) + for _, ByteMatchTuple := range ByteMatchTuples.List() { + ByteMatch := ByteMatchTuple.(map[string]interface{}) + ByteMatchUpdate := &waf.ByteMatchSetUpdate{ + Action: aws.String(ChangeAction), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: expandFieldToMatch(ByteMatch["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + PositionalConstraint: aws.String(ByteMatch["positional_constraint"].(string)), + TargetString: []byte(ByteMatch["target_string"].(string)), + TextTransformation: aws.String(ByteMatch["text_transformation"].(string)), + }, + } + req.Updates = append(req.Updates, ByteMatchUpdate) + } + + return conn.UpdateByteMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) } diff --git a/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go b/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go index ae9dae099..c5e0ce213 100644 --- a/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go +++ b/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go @@ -96,49 +96,43 @@ func testAccCheckAWSWafByteMatchSetDisappears(v *waf.ByteMatchSet) resource.Test return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: resp.ChangeToken, - ByteMatchSetId: v.ByteMatchSetId, - } - - for _, ByteMatchTuple := range v.ByteMatchTuples { - ByteMatchUpdate := &waf.ByteMatchSetUpdate{ - Action: aws.String("DELETE"), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: ByteMatchTuple.FieldToMatch, - PositionalConstraint: ByteMatchTuple.PositionalConstraint, - TargetString: ByteMatchTuple.TargetString, - TextTransformation: ByteMatchTuple.TextTransformation, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: v.ByteMatchSetId, } - req.Updates = append(req.Updates, ByteMatchUpdate) - } - _, err = conn.UpdateByteMatchSet(req) + for _, ByteMatchTuple := range v.ByteMatchTuples { + ByteMatchUpdate := &waf.ByteMatchSetUpdate{ + Action: aws.String("DELETE"), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: ByteMatchTuple.FieldToMatch, + PositionalConstraint: ByteMatchTuple.PositionalConstraint, + TargetString: ByteMatchTuple.TargetString, + TextTransformation: ByteMatchTuple.TextTransformation, + }, + } + req.Updates = append(req.Updates, ByteMatchUpdate) + } + + return conn.UpdateByteMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: v.ByteMatchSetId, + } + return conn.DeleteByteMatchSet(opts) + }) if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) + return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) } - opts := &waf.DeleteByteMatchSetInput{ - ChangeToken: resp.ChangeToken, - ByteMatchSetId: v.ByteMatchSetId, - } - if _, err := conn.DeleteByteMatchSet(opts); err != nil { - return err - } return nil } } diff --git a/builtin/providers/aws/resource_aws_waf_ipset.go b/builtin/providers/aws/resource_aws_waf_ipset.go index 4b07f6419..426508db4 100644 --- a/builtin/providers/aws/resource_aws_waf_ipset.go +++ b/builtin/providers/aws/resource_aws_waf_ipset.go @@ -46,23 +46,18 @@ func resourceAwsWafIPSet() *schema.Resource { func resourceAwsWafIPSetCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - res, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - params := &waf.CreateIPSetInput{ - ChangeToken: res.ChangeToken, - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateIPSet(params) + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateIPSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateIPSet(params) + }) if err != nil { return err } + resp := out.(*waf.CreateIPSetOutput) d.SetId(*resp.IPSet.IPSetId) return resourceAwsWafIPSetUpdate(d, meta) } @@ -117,18 +112,15 @@ func resourceAwsWafIPSetDelete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error Removing IPSetDescriptors: %s", err) } - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteIPSetInput{ - ChangeToken: resp.ChangeToken, - IPSetId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF IPSet") - _, err = conn.DeleteIPSet(req) - + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF IPSet") + return conn.DeleteIPSet(req) + }) if err != nil { return fmt.Errorf("Error Deleting WAF IPSet: %s", err) } @@ -139,33 +131,28 @@ func resourceAwsWafIPSetDelete(d *schema.ResourceData, meta interface{}) error { func updateIPSetResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateIPSetInput{ - ChangeToken: resp.ChangeToken, - IPSetId: aws.String(d.Id()), - } - - IPSetDescriptors := d.Get("ip_set_descriptors").(*schema.Set) - for _, IPSetDescriptor := range IPSetDescriptors.List() { - IPSet := IPSetDescriptor.(map[string]interface{}) - IPSetUpdate := &waf.IPSetUpdate{ - Action: aws.String(ChangeAction), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String(IPSet["type"].(string)), - Value: aws.String(IPSet["value"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(d.Id()), } - req.Updates = append(req.Updates, IPSetUpdate) - } - _, err = conn.UpdateIPSet(req) + IPSetDescriptors := d.Get("ip_set_descriptors").(*schema.Set) + for _, IPSetDescriptor := range IPSetDescriptors.List() { + IPSet := IPSetDescriptor.(map[string]interface{}) + IPSetUpdate := &waf.IPSetUpdate{ + Action: aws.String(ChangeAction), + IPSetDescriptor: &waf.IPSetDescriptor{ + Type: aws.String(IPSet["type"].(string)), + Value: aws.String(IPSet["value"].(string)), + }, + } + req.Updates = append(req.Updates, IPSetUpdate) + } + + return conn.UpdateIPSet(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF IPSet: %s", err) } diff --git a/builtin/providers/aws/resource_aws_waf_ipset_test.go b/builtin/providers/aws/resource_aws_waf_ipset_test.go index ffb4d6cb0..3db32dc44 100644 --- a/builtin/providers/aws/resource_aws_waf_ipset_test.go +++ b/builtin/providers/aws/resource_aws_waf_ipset_test.go @@ -100,46 +100,39 @@ func testAccCheckAWSWafIPSetDisappears(v *waf.IPSet) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateIPSetInput{ - ChangeToken: resp.ChangeToken, - IPSetId: v.IPSetId, - } - - for _, IPSetDescriptor := range v.IPSetDescriptors { - IPSetUpdate := &waf.IPSetUpdate{ - Action: aws.String("DELETE"), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: IPSetDescriptor.Type, - Value: IPSetDescriptor.Value, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateIPSetInput{ + ChangeToken: token, + IPSetId: v.IPSetId, } - req.Updates = append(req.Updates, IPSetUpdate) - } - _, err = conn.UpdateIPSet(req) + for _, IPSetDescriptor := range v.IPSetDescriptors { + IPSetUpdate := &waf.IPSetUpdate{ + Action: aws.String("DELETE"), + IPSetDescriptor: &waf.IPSetDescriptor{ + Type: IPSetDescriptor.Type, + Value: IPSetDescriptor.Value, + }, + } + req.Updates = append(req.Updates, IPSetUpdate) + } + + return conn.UpdateIPSet(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF IPSet: %s", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteIPSetInput{ + ChangeToken: token, + IPSetId: v.IPSetId, + } + return conn.DeleteIPSet(opts) + }) if err != nil { - return fmt.Errorf("Error getting change token for waf IPSet: %s", err) - } - - opts := &waf.DeleteIPSetInput{ - ChangeToken: resp.ChangeToken, - IPSetId: v.IPSetId, - } - if _, err := conn.DeleteIPSet(opts); err != nil { - return err + return fmt.Errorf("Error Deleting WAF IPSet: %s", err) } return nil } diff --git a/builtin/providers/aws/resource_aws_waf_rule.go b/builtin/providers/aws/resource_aws_waf_rule.go index ba59bf222..f750f6ea0 100644 --- a/builtin/providers/aws/resource_aws_waf_rule.go +++ b/builtin/providers/aws/resource_aws_waf_rule.go @@ -71,24 +71,20 @@ func resourceAwsWafRule() *schema.Resource { func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateRuleInput{ + ChangeToken: token, + MetricName: aws.String(d.Get("metric_name").(string)), + Name: aws.String(d.Get("name").(string)), + } - res, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - params := &waf.CreateRuleInput{ - ChangeToken: res.ChangeToken, - MetricName: aws.String(d.Get("metric_name").(string)), - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateRule(params) + return conn.CreateRule(params) + }) if err != nil { return err } + resp := out.(*waf.CreateRuleOutput) d.SetId(*resp.Rule.RuleId) return resourceAwsWafRuleUpdate(d, meta) } @@ -143,18 +139,16 @@ func resourceAwsWafRuleDelete(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error Removing WAF Rule Predicates: %s", err) } - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteRuleInput{ - ChangeToken: resp.ChangeToken, - RuleId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF Rule") - _, err = conn.DeleteRule(req) + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteRuleInput{ + ChangeToken: token, + RuleId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF Rule") + return conn.DeleteRule(req) + }) if err != nil { return fmt.Errorf("Error deleting WAF Rule: %s", err) } @@ -165,34 +159,29 @@ func resourceAwsWafRuleDelete(d *schema.ResourceData, meta interface{}) error { func updateWafRuleResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateRuleInput{ - ChangeToken: resp.ChangeToken, - RuleId: aws.String(d.Id()), - } - - predicatesSet := d.Get("predicates").(*schema.Set) - for _, predicateI := range predicatesSet.List() { - predicate := predicateI.(map[string]interface{}) - updatePredicate := &waf.RuleUpdate{ - Action: aws.String(ChangeAction), - Predicate: &waf.Predicate{ - Negated: aws.Bool(predicate["negated"].(bool)), - Type: aws.String(predicate["type"].(string)), - DataId: aws.String(predicate["data_id"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateRuleInput{ + ChangeToken: token, + RuleId: aws.String(d.Id()), } - req.Updates = append(req.Updates, updatePredicate) - } - _, err = conn.UpdateRule(req) + predicatesSet := d.Get("predicates").(*schema.Set) + for _, predicateI := range predicatesSet.List() { + predicate := predicateI.(map[string]interface{}) + updatePredicate := &waf.RuleUpdate{ + Action: aws.String(ChangeAction), + Predicate: &waf.Predicate{ + Negated: aws.Bool(predicate["negated"].(bool)), + Type: aws.String(predicate["type"].(string)), + DataId: aws.String(predicate["data_id"].(string)), + }, + } + req.Updates = append(req.Updates, updatePredicate) + } + + return conn.UpdateRule(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF Rule: %s", err) } diff --git a/builtin/providers/aws/resource_aws_waf_rule_test.go b/builtin/providers/aws/resource_aws_waf_rule_test.go index 52065b106..c8e6bafbf 100644 --- a/builtin/providers/aws/resource_aws_waf_rule_test.go +++ b/builtin/providers/aws/resource_aws_waf_rule_test.go @@ -99,47 +99,40 @@ func testAccCheckAWSWafRuleDisappears(v *waf.Rule) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateRuleInput{ - ChangeToken: resp.ChangeToken, - RuleId: v.RuleId, - } - - for _, Predicate := range v.Predicates { - Predicate := &waf.RuleUpdate{ - Action: aws.String("DELETE"), - Predicate: &waf.Predicate{ - Negated: Predicate.Negated, - Type: Predicate.Type, - DataId: Predicate.DataId, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateRuleInput{ + ChangeToken: token, + RuleId: v.RuleId, } - req.Updates = append(req.Updates, Predicate) - } - _, err = conn.UpdateRule(req) + for _, Predicate := range v.Predicates { + Predicate := &waf.RuleUpdate{ + Action: aws.String("DELETE"), + Predicate: &waf.Predicate{ + Negated: Predicate.Negated, + Type: Predicate.Type, + DataId: Predicate.DataId, + }, + } + req.Updates = append(req.Updates, Predicate) + } + + return conn.UpdateRule(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF Rule: %s", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteRuleInput{ + ChangeToken: token, + RuleId: v.RuleId, + } + return conn.DeleteRule(opts) + }) if err != nil { - return fmt.Errorf("Error getting change token for waf Rule: %s", err) - } - - opts := &waf.DeleteRuleInput{ - ChangeToken: resp.ChangeToken, - RuleId: v.RuleId, - } - if _, err := conn.DeleteRule(opts); err != nil { - return err + return fmt.Errorf("Error Deleting WAF Rule: %s", err) } return nil } diff --git a/builtin/providers/aws/resource_aws_waf_size_constraint_set.go b/builtin/providers/aws/resource_aws_waf_size_constraint_set.go index 9f384e82c..db9d5c516 100644 --- a/builtin/providers/aws/resource_aws_waf_size_constraint_set.go +++ b/builtin/providers/aws/resource_aws_waf_size_constraint_set.go @@ -69,24 +69,19 @@ func resourceAwsWafSizeConstraintSetCreate(d *schema.ResourceData, meta interfac log.Printf("[INFO] Creating SizeConstraintSet: %s", d.Get("name").(string)) - // ChangeToken - var ct *waf.GetChangeTokenInput - - res, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - params := &waf.CreateSizeConstraintSetInput{ - ChangeToken: res.ChangeToken, - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateSizeConstraintSet(params) + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateSizeConstraintSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateSizeConstraintSet(params) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error creating SizeConstraintSet: {{err}}", err) } + resp := out.(*waf.CreateSizeConstraintSetOutput) d.SetId(*resp.SizeConstraintSet.SizeConstraintSetId) @@ -134,17 +129,14 @@ func resourceAwsWafSizeConstraintSetDelete(d *schema.ResourceData, meta interfac return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) } - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteSizeConstraintSetInput{ - ChangeToken: resp.ChangeToken, - SizeConstraintSetId: aws.String(d.Id()), - } - - _, err = conn.DeleteSizeConstraintSet(req) - + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: aws.String(d.Id()), + } + return conn.DeleteSizeConstraintSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) } @@ -155,34 +147,30 @@ func resourceAwsWafSizeConstraintSetDelete(d *schema.ResourceData, meta interfac func updateSizeConstraintSetResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - req := &waf.UpdateSizeConstraintSetInput{ - ChangeToken: resp.ChangeToken, - SizeConstraintSetId: aws.String(d.Id()), - } - - sizeConstraints := d.Get("size_constraints").(*schema.Set) - for _, sizeConstraint := range sizeConstraints.List() { - sc := sizeConstraint.(map[string]interface{}) - sizeConstraintUpdate := &waf.SizeConstraintSetUpdate{ - Action: aws.String(ChangeAction), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(sc["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - ComparisonOperator: aws.String(sc["comparison_operator"].(string)), - Size: aws.Int64(int64(sc["size"].(int))), - TextTransformation: aws.String(sc["text_transformation"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: aws.String(d.Id()), } - req.Updates = append(req.Updates, sizeConstraintUpdate) - } - _, err = conn.UpdateSizeConstraintSet(req) + sizeConstraints := d.Get("size_constraints").(*schema.Set) + for _, sizeConstraint := range sizeConstraints.List() { + sc := sizeConstraint.(map[string]interface{}) + sizeConstraintUpdate := &waf.SizeConstraintSetUpdate{ + Action: aws.String(ChangeAction), + SizeConstraint: &waf.SizeConstraint{ + FieldToMatch: expandFieldToMatch(sc["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + ComparisonOperator: aws.String(sc["comparison_operator"].(string)), + Size: aws.Int64(int64(sc["size"].(int))), + TextTransformation: aws.String(sc["text_transformation"].(string)), + }, + } + req.Updates = append(req.Updates, sizeConstraintUpdate) + } + + return conn.UpdateSizeConstraintSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) } diff --git a/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go b/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go index 13eee40e4..a6bd5156e 100644 --- a/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go +++ b/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go @@ -96,45 +96,39 @@ func testAccCheckAWSWafSizeConstraintSetDisappears(v *waf.SizeConstraintSet) res return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateSizeConstraintSetInput{ - ChangeToken: resp.ChangeToken, - SizeConstraintSetId: v.SizeConstraintSetId, - } - - for _, sizeConstraint := range v.SizeConstraints { - sizeConstraintUpdate := &waf.SizeConstraintSetUpdate{ - Action: aws.String("DELETE"), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: sizeConstraint.FieldToMatch, - ComparisonOperator: sizeConstraint.ComparisonOperator, - Size: sizeConstraint.Size, - TextTransformation: sizeConstraint.TextTransformation, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: v.SizeConstraintSetId, } - req.Updates = append(req.Updates, sizeConstraintUpdate) - } - _, err = conn.UpdateSizeConstraintSet(req) + + for _, sizeConstraint := range v.SizeConstraints { + sizeConstraintUpdate := &waf.SizeConstraintSetUpdate{ + Action: aws.String("DELETE"), + SizeConstraint: &waf.SizeConstraint{ + FieldToMatch: sizeConstraint.FieldToMatch, + ComparisonOperator: sizeConstraint.ComparisonOperator, + Size: sizeConstraint.Size, + TextTransformation: sizeConstraint.TextTransformation, + }, + } + req.Updates = append(req.Updates, sizeConstraintUpdate) + } + return conn.UpdateSizeConstraintSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: v.SizeConstraintSetId, + } + return conn.DeleteSizeConstraintSet(opts) + }) if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - opts := &waf.DeleteSizeConstraintSetInput{ - ChangeToken: resp.ChangeToken, - SizeConstraintSetId: v.SizeConstraintSetId, - } - if _, err := conn.DeleteSizeConstraintSet(opts); err != nil { return err } return nil diff --git a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go index 01efd6a32..c888efe5a 100644 --- a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go +++ b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go @@ -61,25 +61,19 @@ func resourceAwsWafSqlInjectionMatchSetCreate(d *schema.ResourceData, meta inter log.Printf("[INFO] Creating SqlInjectionMatchSet: %s", d.Get("name").(string)) - // ChangeToken - var ct *waf.GetChangeTokenInput - - res, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - params := &waf.CreateSqlInjectionMatchSetInput{ - ChangeToken: res.ChangeToken, - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateSqlInjectionMatchSet(params) + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateSqlInjectionMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateSqlInjectionMatchSet(params) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error creating SqlInjectionMatchSet: {{err}}", err) } - + resp := out.(*waf.CreateSqlInjectionMatchSetOutput) d.SetId(*resp.SqlInjectionMatchSet.SqlInjectionMatchSetId) return resourceAwsWafSqlInjectionMatchSetUpdate(d, meta) @@ -126,17 +120,15 @@ func resourceAwsWafSqlInjectionMatchSetDelete(d *schema.ResourceData, meta inter return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) } - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteSqlInjectionMatchSetInput{ - ChangeToken: resp.ChangeToken, - SqlInjectionMatchSetId: aws.String(d.Id()), - } - - _, err = conn.DeleteSqlInjectionMatchSet(req) + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: aws.String(d.Id()), + } + return conn.DeleteSqlInjectionMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) } @@ -147,32 +139,28 @@ func resourceAwsWafSqlInjectionMatchSetDelete(d *schema.ResourceData, meta inter func updateSqlInjectionMatchSetResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - req := &waf.UpdateSqlInjectionMatchSetInput{ - ChangeToken: resp.ChangeToken, - SqlInjectionMatchSetId: aws.String(d.Id()), - } - - sqlInjectionMatchTuples := d.Get("sql_injection_match_tuples").(*schema.Set) - for _, sqlInjectionMatchTuple := range sqlInjectionMatchTuples.List() { - simt := sqlInjectionMatchTuple.(map[string]interface{}) - sizeConstraintUpdate := &waf.SqlInjectionMatchSetUpdate{ - Action: aws.String(ChangeAction), - SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(simt["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(simt["text_transformation"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: aws.String(d.Id()), } - req.Updates = append(req.Updates, sizeConstraintUpdate) - } - _, err = conn.UpdateSqlInjectionMatchSet(req) + sqlInjectionMatchTuples := d.Get("sql_injection_match_tuples").(*schema.Set) + for _, sqlInjectionMatchTuple := range sqlInjectionMatchTuples.List() { + simt := sqlInjectionMatchTuple.(map[string]interface{}) + sizeConstraintUpdate := &waf.SqlInjectionMatchSetUpdate{ + Action: aws.String(ChangeAction), + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ + FieldToMatch: expandFieldToMatch(simt["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(simt["text_transformation"].(string)), + }, + } + req.Updates = append(req.Updates, sizeConstraintUpdate) + } + + return conn.UpdateSqlInjectionMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) } diff --git a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go index f13f6711e..5ea8bca0f 100644 --- a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go +++ b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go @@ -96,44 +96,38 @@ func testAccCheckAWSWafSqlInjectionMatchSetDisappears(v *waf.SqlInjectionMatchSe return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateSqlInjectionMatchSetInput{ - ChangeToken: resp.ChangeToken, - SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, - } - - for _, sqlInjectionMatchTuple := range v.SqlInjectionMatchTuples { - sqlInjectionMatchTupleUpdate := &waf.SqlInjectionMatchSetUpdate{ - Action: aws.String("DELETE"), - SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: sqlInjectionMatchTuple.FieldToMatch, - TextTransformation: sqlInjectionMatchTuple.TextTransformation, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, } - req.Updates = append(req.Updates, sqlInjectionMatchTupleUpdate) - } - _, err = conn.UpdateSqlInjectionMatchSet(req) + + for _, sqlInjectionMatchTuple := range v.SqlInjectionMatchTuples { + sqlInjectionMatchTupleUpdate := &waf.SqlInjectionMatchSetUpdate{ + Action: aws.String("DELETE"), + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ + FieldToMatch: sqlInjectionMatchTuple.FieldToMatch, + TextTransformation: sqlInjectionMatchTuple.TextTransformation, + }, + } + req.Updates = append(req.Updates, sqlInjectionMatchTupleUpdate) + } + return conn.UpdateSqlInjectionMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, + } + return conn.DeleteSqlInjectionMatchSet(opts) + }) if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - opts := &waf.DeleteSqlInjectionMatchSetInput{ - ChangeToken: resp.ChangeToken, - SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, - } - if _, err := conn.DeleteSqlInjectionMatchSet(opts); err != nil { - return err + return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) } return nil } diff --git a/builtin/providers/aws/resource_aws_waf_web_acl.go b/builtin/providers/aws/resource_aws_waf_web_acl.go index dd3a9d1d3..a45b1cc0e 100644 --- a/builtin/providers/aws/resource_aws_waf_web_acl.go +++ b/builtin/providers/aws/resource_aws_waf_web_acl.go @@ -77,25 +77,21 @@ func resourceAwsWafWebAcl() *schema.Resource { func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateWebACLInput{ + ChangeToken: token, + DefaultAction: expandDefaultAction(d), + MetricName: aws.String(d.Get("metric_name").(string)), + Name: aws.String(d.Get("name").(string)), + } - res, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - params := &waf.CreateWebACLInput{ - ChangeToken: res.ChangeToken, - DefaultAction: expandDefaultAction(d), - MetricName: aws.String(d.Get("metric_name").(string)), - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateWebACL(params) + return conn.CreateWebACL(params) + }) if err != nil { return err } + resp := out.(*waf.CreateWebACLOutput) d.SetId(*resp.WebACL.WebACLId) return resourceAwsWafWebAclUpdate(d, meta) } @@ -144,18 +140,16 @@ func resourceAwsWafWebAclDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error Removing WAF ACL Rules: %s", err) } - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteWebACLInput{ - ChangeToken: resp.ChangeToken, - WebACLId: aws.String(d.Id()), - } - - log.Printf("[INFO] Deleting WAF ACL") - _, err = conn.DeleteWebACL(req) + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteWebACLInput{ + ChangeToken: token, + WebACLId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF ACL") + return conn.DeleteWebACL(req) + }) if err != nil { return fmt.Errorf("Error Deleting WAF ACL: %s", err) } @@ -164,38 +158,34 @@ func resourceAwsWafWebAclDelete(d *schema.ResourceData, meta interface{}) error func updateWebAclResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateWebACLInput{ - ChangeToken: resp.ChangeToken, - WebACLId: aws.String(d.Id()), - } - - if d.HasChange("default_action") { - req.DefaultAction = expandDefaultAction(d) - } - - rules := d.Get("rules").(*schema.Set) - for _, rule := range rules.List() { - aclRule := rule.(map[string]interface{}) - action := aclRule["action"].(*schema.Set).List()[0].(map[string]interface{}) - aclRuleUpdate := &waf.WebACLUpdate{ - Action: aws.String(ChangeAction), - ActivatedRule: &waf.ActivatedRule{ - Priority: aws.Int64(int64(aclRule["priority"].(int))), - RuleId: aws.String(aclRule["rule_id"].(string)), - Action: &waf.WafAction{Type: aws.String(action["type"].(string))}, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateWebACLInput{ + ChangeToken: token, + WebACLId: aws.String(d.Id()), } - req.Updates = append(req.Updates, aclRuleUpdate) - } - _, err = conn.UpdateWebACL(req) + + if d.HasChange("default_action") { + req.DefaultAction = expandDefaultAction(d) + } + + rules := d.Get("rules").(*schema.Set) + for _, rule := range rules.List() { + aclRule := rule.(map[string]interface{}) + action := aclRule["action"].(*schema.Set).List()[0].(map[string]interface{}) + aclRuleUpdate := &waf.WebACLUpdate{ + Action: aws.String(ChangeAction), + ActivatedRule: &waf.ActivatedRule{ + Priority: aws.Int64(int64(aclRule["priority"].(int))), + RuleId: aws.String(aclRule["rule_id"].(string)), + Action: &waf.WafAction{Type: aws.String(action["type"].(string))}, + }, + } + req.Updates = append(req.Updates, aclRuleUpdate) + } + return conn.UpdateWebACL(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF ACL: %s", err) } diff --git a/builtin/providers/aws/resource_aws_waf_web_acl_test.go b/builtin/providers/aws/resource_aws_waf_web_acl_test.go index 265cb15a4..6591fed0e 100644 --- a/builtin/providers/aws/resource_aws_waf_web_acl_test.go +++ b/builtin/providers/aws/resource_aws_waf_web_acl_test.go @@ -159,47 +159,40 @@ func testAccCheckAWSWafWebAclDisappears(v *waf.WebACL) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - // ChangeToken - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateWebACLInput{ - ChangeToken: resp.ChangeToken, - WebACLId: v.WebACLId, - } - - for _, ActivatedRule := range v.Rules { - WebACLUpdate := &waf.WebACLUpdate{ - Action: aws.String("DELETE"), - ActivatedRule: &waf.ActivatedRule{ - Priority: ActivatedRule.Priority, - RuleId: ActivatedRule.RuleId, - Action: ActivatedRule.Action, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateWebACLInput{ + ChangeToken: token, + WebACLId: v.WebACLId, } - req.Updates = append(req.Updates, WebACLUpdate) - } - _, err = conn.UpdateWebACL(req) + for _, ActivatedRule := range v.Rules { + WebACLUpdate := &waf.WebACLUpdate{ + Action: aws.String("DELETE"), + ActivatedRule: &waf.ActivatedRule{ + Priority: ActivatedRule.Priority, + RuleId: ActivatedRule.RuleId, + Action: ActivatedRule.Action, + }, + } + req.Updates = append(req.Updates, WebACLUpdate) + } + + return conn.UpdateWebACL(req) + }) if err != nil { return fmt.Errorf("Error Updating WAF ACL: %s", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteWebACLInput{ + ChangeToken: token, + WebACLId: v.WebACLId, + } + return conn.DeleteWebACL(opts) + }) if err != nil { - return fmt.Errorf("Error getting change token for waf ACL: %s", err) - } - - opts := &waf.DeleteWebACLInput{ - ChangeToken: resp.ChangeToken, - WebACLId: v.WebACLId, - } - if _, err := conn.DeleteWebACL(opts); err != nil { - return err + return fmt.Errorf("Error Deleting WAF ACL: %s", err) } return nil } diff --git a/builtin/providers/aws/resource_aws_waf_xss_match_set.go b/builtin/providers/aws/resource_aws_waf_xss_match_set.go index 574245f8b..222940dd0 100644 --- a/builtin/providers/aws/resource_aws_waf_xss_match_set.go +++ b/builtin/providers/aws/resource_aws_waf_xss_match_set.go @@ -61,24 +61,19 @@ func resourceAwsWafXssMatchSetCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[INFO] Creating XssMatchSet: %s", d.Get("name").(string)) - // ChangeToken - var ct *waf.GetChangeTokenInput - - res, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - params := &waf.CreateXssMatchSetInput{ - ChangeToken: res.ChangeToken, - Name: aws.String(d.Get("name").(string)), - } - - resp, err := conn.CreateXssMatchSet(params) + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateXssMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateXssMatchSet(params) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error creating XssMatchSet: {{err}}", err) } + resp := out.(*waf.CreateXssMatchSetOutput) d.SetId(*resp.XssMatchSet.XssMatchSetId) @@ -126,17 +121,15 @@ func resourceAwsWafXssMatchSetDelete(d *schema.ResourceData, meta interface{}) e return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) } - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - - req := &waf.DeleteXssMatchSetInput{ - ChangeToken: resp.ChangeToken, - XssMatchSetId: aws.String(d.Id()), - } - - _, err = conn.DeleteXssMatchSet(req) + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: aws.String(d.Id()), + } + return conn.DeleteXssMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) } @@ -147,32 +140,28 @@ func resourceAwsWafXssMatchSetDelete(d *schema.ResourceData, meta interface{}) e func updateXssMatchSetResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { conn := meta.(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - req := &waf.UpdateXssMatchSetInput{ - ChangeToken: resp.ChangeToken, - XssMatchSetId: aws.String(d.Id()), - } - - xssMatchTuples := d.Get("xss_match_tuples").(*schema.Set) - for _, xssMatchTuple := range xssMatchTuples.List() { - xmt := xssMatchTuple.(map[string]interface{}) - xssMatchTupleUpdate := &waf.XssMatchSetUpdate{ - Action: aws.String(ChangeAction), - XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(xmt["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(xmt["text_transformation"].(string)), - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: aws.String(d.Id()), } - req.Updates = append(req.Updates, xssMatchTupleUpdate) - } - _, err = conn.UpdateXssMatchSet(req) + xssMatchTuples := d.Get("xss_match_tuples").(*schema.Set) + for _, xssMatchTuple := range xssMatchTuples.List() { + xmt := xssMatchTuple.(map[string]interface{}) + xssMatchTupleUpdate := &waf.XssMatchSetUpdate{ + Action: aws.String(ChangeAction), + XssMatchTuple: &waf.XssMatchTuple{ + FieldToMatch: expandFieldToMatch(xmt["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(xmt["text_transformation"].(string)), + }, + } + req.Updates = append(req.Updates, xssMatchTupleUpdate) + } + + return conn.UpdateXssMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) } diff --git a/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go b/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go index 5128fc813..b2d223086 100644 --- a/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go +++ b/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go @@ -96,44 +96,38 @@ func testAccCheckAWSWafXssMatchSetDisappears(v *waf.XssMatchSet) resource.TestCh return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).wafconn - var ct *waf.GetChangeTokenInput - - resp, err := conn.GetChangeToken(ct) - if err != nil { - return fmt.Errorf("Error getting change token: %s", err) - } - - req := &waf.UpdateXssMatchSetInput{ - ChangeToken: resp.ChangeToken, - XssMatchSetId: v.XssMatchSetId, - } - - for _, xssMatchTuple := range v.XssMatchTuples { - xssMatchTupleUpdate := &waf.XssMatchSetUpdate{ - Action: aws.String("DELETE"), - XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: xssMatchTuple.FieldToMatch, - TextTransformation: xssMatchTuple.TextTransformation, - }, + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: v.XssMatchSetId, } - req.Updates = append(req.Updates, xssMatchTupleUpdate) - } - _, err = conn.UpdateXssMatchSet(req) + + for _, xssMatchTuple := range v.XssMatchTuples { + xssMatchTupleUpdate := &waf.XssMatchSetUpdate{ + Action: aws.String("DELETE"), + XssMatchTuple: &waf.XssMatchTuple{ + FieldToMatch: xssMatchTuple.FieldToMatch, + TextTransformation: xssMatchTuple.TextTransformation, + }, + } + req.Updates = append(req.Updates, xssMatchTupleUpdate) + } + return conn.UpdateXssMatchSet(req) + }) if err != nil { return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) } - resp, err = conn.GetChangeToken(ct) + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + opts := &waf.DeleteXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: v.XssMatchSetId, + } + return conn.DeleteXssMatchSet(opts) + }) if err != nil { - return errwrap.Wrapf("[ERROR] Error getting change token: {{err}}", err) - } - - opts := &waf.DeleteXssMatchSetInput{ - ChangeToken: resp.ChangeToken, - XssMatchSetId: v.XssMatchSetId, - } - if _, err := conn.DeleteXssMatchSet(opts); err != nil { - return err + return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) } return nil } diff --git a/builtin/providers/aws/waf_token_handlers.go b/builtin/providers/aws/waf_token_handlers.go new file mode 100644 index 000000000..ac99f0950 --- /dev/null +++ b/builtin/providers/aws/waf_token_handlers.go @@ -0,0 +1,49 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" +) + +type WafRetryer struct { + Connection *waf.WAF + Region string +} + +type withTokenFunc func(token *string) (interface{}, error) + +func (t *WafRetryer) RetryWithToken(f withTokenFunc) (interface{}, error) { + awsMutexKV.Lock(t.Region) + defer awsMutexKV.Unlock(t.Region) + + var out interface{} + err := resource.Retry(15*time.Minute, func() *resource.RetryError { + var err error + var tokenOut *waf.GetChangeTokenOutput + + tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) + if err != nil { + return resource.NonRetryableError(errwrap.Wrapf("Failed to acquire change token: {{err}}", err)) + } + + out, err = f(tokenOut.ChangeToken) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "WAFStaleDataException" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + + return out, err +} + +func newWafRetryer(conn *waf.WAF, region string) *WafRetryer { + return &WafRetryer{Connection: conn, Region: region} +} From 80c359c3430f079679341191f353ce3ef3e4dce5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 14 Apr 2017 21:12:45 +0100 Subject: [PATCH 135/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b5d43a9e..c6917327e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] * provider/aws: Fix DB Parameter Group Name [GH-13279] + * provider/aws: Use mutex & retry for WAF change operations [GH-13656] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/openstack: Fix updating Ports [GH-13604] From 3d0073e05c89777ffbfb609615131e603e42427c Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 14 Apr 2017 22:32:30 +0200 Subject: [PATCH 136/342] core: fix a crash by suggesting a different approach to solve #11170 (#13541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert #11245, #11321, #11498 and #11757 These PR’s are all related to issue #11170 for which I would like to propose a different solution then the one currently implemented. * A different approach to solve #11170 This approach has (IMHO) a few advantages with regards to the solution currently implemented. I will elaborate on this in the PR. --- flatmap/expand.go | 19 +- flatmap/expand_test.go | 16 ++ helper/schema/schema.go | 34 ---- helper/schema/schema_test.go | 357 ----------------------------------- terraform/state.go | 27 --- terraform/state_test.go | 60 ------ 6 files changed, 30 insertions(+), 483 deletions(-) diff --git a/flatmap/expand.go b/flatmap/expand.go index e325077ef..6f2f6a228 100644 --- a/flatmap/expand.go +++ b/flatmap/expand.go @@ -37,7 +37,7 @@ func Expand(m map[string]string, key string) interface{} { // Check if this is a prefix in the map prefix := key + "." - for k, _ := range m { + for k := range m { if strings.HasPrefix(k, prefix) { return expandMap(m, prefix) } @@ -52,9 +52,17 @@ func expandArray(m map[string]string, prefix string) []interface{} { panic(err) } - // The Schema "Set" type stores its values in an array format, but using - // numeric hash values instead of ordinal keys. Take the set of keys - // regardless of value, and expand them in numeric order. + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. // See GH-11042 for more details. keySet := map[int]bool{} computed := map[string]bool{} @@ -107,7 +115,7 @@ func expandArray(m map[string]string, prefix string) []interface{} { func expandMap(m map[string]string, prefix string) map[string]interface{} { result := make(map[string]interface{}) - for k, _ := range m { + for k := range m { if !strings.HasPrefix(k, prefix) { continue } @@ -125,6 +133,7 @@ func expandMap(m map[string]string, prefix string) map[string]interface{} { if key == "%" { continue } + result[key] = Expand(m, k[:len(prefix)+len(key)]) } diff --git a/flatmap/expand_test.go b/flatmap/expand_test.go index cf74fadbc..61b151b17 100644 --- a/flatmap/expand_test.go +++ b/flatmap/expand_test.go @@ -147,6 +147,22 @@ func TestExpand(t *testing.T) { }, }, }, + + { + Map: map[string]string{ + "struct.#": "1", + "struct.0.name": "hello", + "struct.0.set.#": "0", + "struct.0.set.0.key": "value", + }, + Key: "struct", + Output: []interface{}{ + map[string]interface{}{ + "name": "hello", + "set": []interface{}{}, + }, + }, + }, } for _, tc := range cases { diff --git a/helper/schema/schema.go b/helper/schema/schema.go index 08c83263e..d04f05b35 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -656,19 +656,6 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { return nil } -func (m schemaMap) markAsRemoved(k string, schema *Schema, diff *terraform.InstanceDiff) { - existingDiff, ok := diff.Attributes[k] - if ok { - existingDiff.NewRemoved = true - diff.Attributes[k] = schema.finalizeDiff(existingDiff) - return - } - - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - NewRemoved: true, - }) -} - func (m schemaMap) diff( k string, schema *Schema, @@ -792,7 +779,6 @@ func (m schemaMap) diffList( switch t := schema.Elem.(type) { case *Resource: - countDiff, cOk := diff.GetAttribute(k + ".#") // This is a complex resource for i := 0; i < maxLen; i++ { for k2, schema := range t.Schema { @@ -801,15 +787,6 @@ func (m schemaMap) diffList( if err != nil { return err } - - // If parent list is being removed - // remove all subfields which were missed by the diff func - // We process these separately because type-specific diff functions - // lack the context (hierarchy of fields) - subKeyIsCount := strings.HasSuffix(subK, ".#") - if cOk && countDiff.New == "0" && !subKeyIsCount { - m.markAsRemoved(subK, schema, diff) - } } } case *Schema: @@ -1019,7 +996,6 @@ func (m schemaMap) diffSet( for _, code := range list { switch t := schema.Elem.(type) { case *Resource: - countDiff, cOk := diff.GetAttribute(k + ".#") // This is a complex resource for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%s.%s", k, code, k2) @@ -1027,17 +1003,7 @@ func (m schemaMap) diffSet( if err != nil { return err } - - // If parent set is being removed - // remove all subfields which were missed by the diff func - // We process these separately because type-specific diff functions - // lack the context (hierarchy of fields) - subKeyIsCount := strings.HasSuffix(subK, ".#") - if cOk && countDiff.New == "0" && !subKeyIsCount { - m.markAsRemoved(subK, schema, diff) - } } - case *Schema: // Copy the schema so that we can set Computed/ForceNew from // the parent schema (the TypeSet). diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go index 4d93ffd17..d2f667576 100644 --- a/helper/schema/schema_test.go +++ b/helper/schema/schema_test.go @@ -2777,363 +2777,6 @@ func TestSchemaMap_Diff(t *testing.T) { }, }, }, - - { - Name: "Removal of TypeList should cause nested Bool fields w/ Default to be removed too", - Schema: map[string]*Schema{ - "deployment_group_name": &Schema{ - Type: TypeString, - Required: true, - ForceNew: true, - }, - - "alarm_configuration": &Schema{ - Type: TypeList, - Optional: true, - MaxItems: 1, - Elem: &Resource{ - Schema: map[string]*Schema{ - "alarms": &Schema{ - Type: TypeSet, - Optional: true, - Set: HashString, - Elem: &Schema{Type: TypeString}, - }, - - "enabled": &Schema{ - Type: TypeBool, - Optional: true, - }, - - "ignore_poll_alarm_failure": &Schema{ - Type: TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "alarm_configuration.#": "1", - "alarm_configuration.0.alarms.#": "1", - "alarm_configuration.0.alarms.2356372769": "foo", - "alarm_configuration.0.enabled": "true", - "alarm_configuration.0.ignore_poll_alarm_failure": "false", - "deployment_group_name": "foo-group-32345345345", - }, - }, - - Config: map[string]interface{}{ - "deployment_group_name": "foo-group-32345345345", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "alarm_configuration.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: false, - }, - "alarm_configuration.0.alarms": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - "alarm_configuration.0.alarms.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: false, - }, - "alarm_configuration.0.alarms.2356372769": &terraform.ResourceAttrDiff{ - Old: "foo", - New: "", - NewRemoved: true, - }, - "alarm_configuration.0.enabled": &terraform.ResourceAttrDiff{ - Old: "true", - New: "false", - NewRemoved: true, - }, - "alarm_configuration.0.ignore_poll_alarm_failure": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - }, - }, - }, - - { - Name: "Removal of TypeList should cause all empty nested String fields to be removed too", - Schema: map[string]*Schema{ - "bucket": { - Type: TypeString, - Required: true, - ForceNew: true, - }, - - "acl": { - Type: TypeString, - Default: "private", - Optional: true, - }, - - "website": { - Type: TypeList, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "index_document": { - Type: TypeString, - Optional: true, - }, - - "error_document": { - Type: TypeString, - Optional: true, - }, - - "redirect_all_requests_to": { - Type: TypeString, - Optional: true, - }, - - "routing_rules": { - Type: TypeString, - Optional: true, - }, - }, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "acl": "public-read", - "bucket": "tf-test-bucket-5011072831090096749", - "website.#": "1", - "website.0.error_document": "error.html", - "website.0.index_document": "index.html", - "website.0.redirect_all_requests_to": "", - }, - }, - - Config: map[string]interface{}{ - "acl": "public-read", - "bucket": "tf-test-bucket-5011072831090096749", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "website.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: false, - }, - "website.0.index_document": &terraform.ResourceAttrDiff{ - Old: "index.html", - New: "", - NewRemoved: true, - }, - "website.0.error_document": &terraform.ResourceAttrDiff{ - Old: "error.html", - New: "", - NewRemoved: true, - }, - "website.0.redirect_all_requests_to": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - "website.0.routing_rules": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - }, - }, - }, - - { - Name: "Removal of TypeList should cause nested Int fields w/ Default to be removed too", - Schema: map[string]*Schema{ - "availability_zones": &Schema{ - Type: TypeSet, - Elem: &Schema{Type: TypeString}, - Optional: true, - Computed: true, - Set: HashString, - }, - - "access_logs": &Schema{ - Type: TypeList, - Optional: true, - MaxItems: 1, - Elem: &Resource{ - Schema: map[string]*Schema{ - "interval": &Schema{ - Type: TypeInt, - Optional: true, - Default: 60, - }, - "bucket": &Schema{ - Type: TypeString, - Required: true, - }, - "bucket_prefix": &Schema{ - Type: TypeString, - Optional: true, - }, - "enabled": &Schema{ - Type: TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "access_logs.#": "1", - "access_logs.0.bucket": "terraform-access-logs-bucket-5906065226840117876", - "access_logs.0.bucket_prefix": "", - "access_logs.0.enabled": "true", - "access_logs.0.interval": "5", - "availability_zones.#": "3", - "availability_zones.2050015877": "us-west-2c", - "availability_zones.221770259": "us-west-2b", - "availability_zones.2487133097": "us-west-2a", - }, - }, - - Config: map[string]interface{}{ - "availability_zones": []interface{}{"us-west-2a", "us-west-2b", "us-west-2c"}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "access_logs.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: false, - }, - "access_logs.0.bucket": &terraform.ResourceAttrDiff{ - Old: "terraform-access-logs-bucket-5906065226840117876", - New: "", - NewRemoved: true, - }, - "access_logs.0.bucket_prefix": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - "access_logs.0.enabled": &terraform.ResourceAttrDiff{ - Old: "", - New: "", - NewRemoved: true, - }, - "access_logs.0.interval": &terraform.ResourceAttrDiff{ - Old: "5", - New: "60", - NewRemoved: true, - }, - }, - }, - }, - - { - Name: "Removal of TypeSet should cause computed fields to be removed", - Schema: map[string]*Schema{ - "type_set": &Schema{ - Type: TypeSet, - Optional: true, - Elem: &Resource{ - Schema: map[string]*Schema{ - "name": &Schema{ - Type: TypeString, - Optional: true, - }, - "required": &Schema{ - Type: TypeString, - Required: true, - }, - "value": &Schema{ - Type: TypeInt, - Optional: true, - }, - "required_value": &Schema{ - Type: TypeInt, - Required: true, - }, - "computed_value": &Schema{ - Type: TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: func(i interface{}) int { - if i != nil { - return 12345 - } - return 0 - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "type_set.#": "1", - "type_set.12345.name": "Name", - "type_set.12345.required": "Required", - "type_set.12345.value": "0", - "type_set.12345.required_value": "5", - "type_set.12345.computed_value": "COMPUTED", - }, - }, - - Config: map[string]interface{}{ - "type_set": []interface{}{}, - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "type_set.#": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: false, - }, - "type_set.12345.name": &terraform.ResourceAttrDiff{ - Old: "Name", - New: "", - NewRemoved: true, - }, - "type_set.12345.required": &terraform.ResourceAttrDiff{ - Old: "Required", - New: "", - NewRemoved: true, - }, - "type_set.12345.value": &terraform.ResourceAttrDiff{ - Old: "0", - New: "0", - NewRemoved: true, - }, - "type_set.12345.required_value": &terraform.ResourceAttrDiff{ - Old: "5", - New: "0", - NewRemoved: true, - }, - "type_set.12345.computed_value": &terraform.ResourceAttrDiff{ - NewRemoved: true, - }, - }, - }, - }, } for i, tc := range cases { diff --git a/terraform/state.go b/terraform/state.go index 84d4c2669..074b68245 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -10,7 +10,6 @@ import ( "io/ioutil" "log" "reflect" - "regexp" "sort" "strconv" "strings" @@ -1713,32 +1712,6 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { } } - // Remove any now empty array, maps or sets because a parent structure - // won't include these entries in the count value. - isCount := regexp.MustCompile(`\.[%#]$`).MatchString - var deleted []string - - for k, v := range result.Attributes { - if isCount(k) && v == "0" { - delete(result.Attributes, k) - deleted = append(deleted, k) - } - } - - for _, k := range deleted { - // Sanity check for invalid structures. - // If we removed the primary count key, there should have been no - // other keys left with this prefix. - - // this must have a "#" or "%" which we need to remove - base := k[:len(k)-1] - for k, _ := range result.Attributes { - if strings.HasPrefix(k, base) { - panic(fmt.Sprintf("empty structure %q has entry %q", base, k)) - } - } - } - return result } diff --git a/terraform/state_test.go b/terraform/state_test.go index 324ab7970..5578f89c9 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -1450,66 +1450,6 @@ func TestInstanceState_MergeDiff(t *testing.T) { } } -// Make sure we don't leave empty maps or arrays in the flatmapped Attributes, -// since those may affect the counts of a parent structure. -func TestInstanceState_MergeDiffRemoveCounts(t *testing.T) { - is := InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "all.#": "3", - "all.1111": "x", - "all.1234.#": "1", - "all.1234.0": "a", - "all.5678.%": "1", - "all.5678.key": "val", - - // nested empty lists need to be removed cleanly - "all.nested.#": "0", - "all.nested.0.empty.#": "0", - "all.nested.1.empty.#": "0", - - // the value has a prefix that matches another key - // and ntohing should happen to this. - "all.nested_value": "y", - }, - } - - diff := &InstanceDiff{ - Attributes: map[string]*ResourceAttrDiff{ - "all.#": &ResourceAttrDiff{ - Old: "3", - New: "1", - }, - "all.1234.0": &ResourceAttrDiff{ - NewRemoved: true, - }, - "all.1234.#": &ResourceAttrDiff{ - Old: "1", - New: "0", - }, - "all.5678.key": &ResourceAttrDiff{ - NewRemoved: true, - }, - "all.5678.%": &ResourceAttrDiff{ - Old: "1", - New: "0", - }, - }, - } - - is2 := is.MergeDiff(diff) - - expected := map[string]string{ - "all.#": "1", - "all.1111": "x", - "all.nested_value": "y", - } - - if !reflect.DeepEqual(expected, is2.Attributes) { - t.Fatalf("bad: %#v", is2.Attributes) - } -} - // GH-12183. This tests that a list with a computed set generates the // right partial state. This never failed but is put here for completion // of the test case for GH-12183. From f7adde0c448749bccd7004d73ad0d9ebca347394 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 13 Apr 2017 11:11:41 -0400 Subject: [PATCH 137/342] remove maps with empty counts during expand When we encounter maps with empty counts, remove them from the expansion to prevent already empty sub-elements from being retained. Convert TestExpand to subtests for easier debugging. --- flatmap/expand.go | 6 ++++++ flatmap/expand_test.go | 30 +++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/flatmap/expand.go b/flatmap/expand.go index 6f2f6a228..2bfb3fe3d 100644 --- a/flatmap/expand.go +++ b/flatmap/expand.go @@ -114,6 +114,12 @@ func expandArray(m map[string]string, prefix string) []interface{} { } func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just procede as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + result := make(map[string]interface{}) for k := range m { if !strings.HasPrefix(k, prefix) { diff --git a/flatmap/expand_test.go b/flatmap/expand_test.go index 61b151b17..c0fa83211 100644 --- a/flatmap/expand_test.go +++ b/flatmap/expand_test.go @@ -163,17 +163,29 @@ func TestExpand(t *testing.T) { }, }, }, + + { + Map: map[string]string{ + "empty_map_of_sets.%": "0", + "empty_map_of_sets.set1.#": "0", + "empty_map_of_sets.set1.1234": "x", + }, + Key: "empty_map_of_sets", + Output: map[string]interface{}{}, + }, } for _, tc := range cases { - actual := Expand(tc.Map, tc.Key) - if !reflect.DeepEqual(actual, tc.Output) { - t.Errorf( - "Key: %v\nMap:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n", - tc.Key, - tc.Map, - actual, - tc.Output) - } + t.Run(tc.Key, func(t *testing.T) { + actual := Expand(tc.Map, tc.Key) + if !reflect.DeepEqual(actual, tc.Output) { + t.Errorf( + "Key: %v\nMap:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n", + tc.Key, + tc.Map, + actual, + tc.Output) + } + }) } } From 3b1738b2a32ee07488689be9c0c0507cc881cc2a Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 14 Apr 2017 22:39:52 +0200 Subject: [PATCH 138/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6917327e..e16f882df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ IMPROVEMENTS: BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] + * core: Fix a crash condition by improving the flatmap.Expand() logic [GH-13541] * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] From 10c6c873ed07ac1bd75215b58be67a12c9648252 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 14 Apr 2017 23:41:59 +0200 Subject: [PATCH 139/342] provider/aws: add an option to skip getting the EC2 platforms (#13672) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add an option to skip getting the EC2 platforms Even through this call fails silently in case of an error (usually lack of rights), it’s still a pretty extensive call. In our region (eu-west-1) this can take up to 3 seconds. And since we have a system that involves doing much planning with the option `-refresh=false` these additional 3 seconds are really very annoying and totally not needed. So being able to choose to skip them would make our lives a little better :wink: * Update the docs accordingly --- builtin/providers/aws/config.go | 17 ++++++++++------- builtin/providers/aws/provider.go | 11 +++++++++++ .../docs/providers/aws/index.html.markdown | 4 ++++ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index a65bf93e5..17105d259 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -97,6 +97,7 @@ type Config struct { Insecure bool SkipCredsValidation bool + SkipGetEC2Platforms bool SkipRegionValidation bool SkipRequestingAccountId bool SkipMetadataApiCheck bool @@ -280,13 +281,15 @@ func (c *Config) Client() (interface{}, error) { client.ec2conn = ec2.New(awsEc2Sess) - supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn) - if err != nil { - // We intentionally fail *silently* because there's a chance - // user just doesn't have ec2:DescribeAccountAttributes permissions - log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err) - } else { - client.supportedplatforms = supportedPlatforms + if !c.SkipGetEC2Platforms { + supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn) + if err != nil { + // We intentionally fail *silently* because there's a chance + // user just doesn't have ec2:DescribeAccountAttributes permissions + log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err) + } else { + client.supportedplatforms = supportedPlatforms + } } client.acmconn = acm.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index d086dedb3..fd761126c 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -122,6 +122,13 @@ func Provider() terraform.ResourceProvider { Description: descriptions["skip_credentials_validation"], }, + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_get_ec2_platforms"], + }, + "skip_region_validation": { Type: schema.TypeBool, Optional: true, @@ -489,6 +496,9 @@ func init() { "skip_credentials_validation": "Skip the credentials validation via STS API. " + "Used for AWS API implementations that do not have STS available/implemented.", + "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + + "Used by users that don't have ec2:DescribeAccountAttributes permissions.", + "skip_region_validation": "Skip static validation of region name. " + "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", @@ -528,6 +538,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { MaxRetries: d.Get("max_retries").(int), Insecure: d.Get("insecure").(bool), SkipCredsValidation: d.Get("skip_credentials_validation").(bool), + SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), SkipRegionValidation: d.Get("skip_region_validation").(bool), SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown index ca7952775..958888c92 100644 --- a/website/source/docs/providers/aws/index.html.markdown +++ b/website/source/docs/providers/aws/index.html.markdown @@ -180,6 +180,10 @@ The following arguments are supported in the `provider` block: validation via the STS API. Useful for AWS API implementations that do not have STS available or implemented. +* `skip_get_ec2_platforms` - (Optional) Skip getting the supported EC2 + platforms. Used by users that don't have ec2:DescribeAccountAttributes + permissions. + * `skip_region_validation` - (Optional) Skip validation of provided region name. Useful for AWS-like implementations that use their own region names or to bypass the validation for regions that aren't publicly available yet. From 56dae36f978c9664c22de57e129614eab10858a6 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 14 Apr 2017 23:44:02 +0200 Subject: [PATCH 140/342] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e16f882df..3567d7750 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,8 @@ FEATURES: * **New Data Source:** `google_compute_subnetwork` [GH-12442] IMPROVEMENTS: - * google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] + * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] + * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] BUG FIXES: From c33810f5cee2a54956d98c1153f0dbae6b6ed30f Mon Sep 17 00:00:00 2001 From: Jay Wang Date: Fri, 14 Apr 2017 15:19:22 -0700 Subject: [PATCH 141/342] Updated test to include the scenario --- .../azurerm/resource_arm_subnet_test.go | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_subnet_test.go b/builtin/providers/azurerm/resource_arm_subnet_test.go index 5f1f2bcbe..264b0a540 100644 --- a/builtin/providers/azurerm/resource_arm_subnet_test.go +++ b/builtin/providers/azurerm/resource_arm_subnet_test.go @@ -13,7 +13,7 @@ import ( func TestAccAzureRMSubnet_basic(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -33,7 +33,7 @@ func TestAccAzureRMSubnet_basic(t *testing.T) { func TestAccAzureRMSubnet_disappears(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -152,5 +152,22 @@ resource "azurerm_subnet" "test" { resource_group_name = "${azurerm_resource_group.test.name}" virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "10.0.2.0/24" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_route_table" "test" { + name = "acctestroutetable%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "West US" +} + +resource "azurerm_route" "test" { + name = "acctestroute%d" + resource_group_name = "${azurerm_resource_group.test.name}" + route_table_name = "${azurerm_route_table.test.name}" + + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" } ` From 279b00dd12cafc4b54dc6612373312bc449678a9 Mon Sep 17 00:00:00 2001 From: Jay Wang Date: Fri, 14 Apr 2017 16:50:46 -0700 Subject: [PATCH 142/342] Lock Route Table / Subnets --- .../providers/azurerm/resource_arm_subnet.go | 21 +++++++++++++++++++ builtin/providers/azurerm/resourceid.go | 10 +++++++++ 2 files changed, 31 insertions(+) diff --git a/builtin/providers/azurerm/resource_arm_subnet.go b/builtin/providers/azurerm/resource_arm_subnet.go index c5329b9f8..769e7ebd8 100644 --- a/builtin/providers/azurerm/resource_arm_subnet.go +++ b/builtin/providers/azurerm/resource_arm_subnet.go @@ -96,6 +96,15 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error { properties.RouteTable = &network.RouteTable{ ID: &rtId, } + + routeTableName, err := parseRouteTableName(rtId) + + if err != nil { + return err + } + + armMutexKV.Lock(routeTableName) + defer armMutexKV.Unlock(routeTableName) } subnet := network.Subnet{ @@ -182,6 +191,18 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error { name := id.Path["subnets"] vnetName := id.Path["virtualNetworks"] + if v, ok := d.GetOk("route_table_id"); ok { + rtId := v.(string) + routeTableName, err := parseRouteTableName(rtId) + + if err != nil { + return err + } + + armMutexKV.Lock(routeTableName) + defer armMutexKV.Unlock(routeTableName) + } + armMutexKV.Lock(vnetName) defer armMutexKV.Unlock(vnetName) diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go index b05f4d75f..af0f35091 100644 --- a/builtin/providers/azurerm/resourceid.go +++ b/builtin/providers/azurerm/resourceid.go @@ -95,3 +95,13 @@ func parseAzureResourceID(id string) (*ResourceID, error) { return idObj, nil } + +func parseRouteTableName(routeTableId string) (string, error) { + id, err := parseAzureResourceID(routeTableId) + + if err != nil { + return "", fmt.Errorf("[ERROR] Unable to parse Route Table ID '%s': %+v", routeTableId, err) + } + + return id.Path["routeTables"], nil +} From 95046828310e078840bff759df3793ca46b25ee9 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 15 Apr 2017 07:56:41 +0100 Subject: [PATCH 143/342] aws: Increase default number of retries to 25 (#13673) --- builtin/providers/aws/provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index fd761126c..b1f9c2bf4 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -70,7 +70,7 @@ func Provider() terraform.ResourceProvider { "max_retries": { Type: schema.TypeInt, Optional: true, - Default: 11, + Default: 25, Description: descriptions["max_retries"], }, From 1a016ac37f48631881c5e7b9e0dd68b830c291d9 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 15 Apr 2017 07:58:15 +0100 Subject: [PATCH 144/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3567d7750..cbca16b59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] * core: Fix a crash condition by improving the flatmap.Expand() logic [GH-13541] * provider/aws: Fix DB Parameter Group Name [GH-13279] + * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] From c63ad9c0f857a8d82e6154aa8fc4fbfb87ceb5f4 Mon Sep 17 00:00:00 2001 From: Gavin Williams Date: Sat, 15 Apr 2017 15:11:28 +0100 Subject: [PATCH 145/342] state/remote/swift: Support Openstack request logging (#13583) * provider/openstack: Expose LogRoundTripper fields externally * state/remote/swift: Add support for debugging Openstack calls using OS_DEBUG env variable. * provider/openstack: Update LogRoundTripper to log headers aswell as body. * Add `RedactHeaders` function in order to redact sensitive http Headers. Refactor `logRequest` and `logResponse` to use `RedactHeaders` func. --- builtin/providers/openstack/config.go | 4 ++-- builtin/providers/openstack/types.go | 33 +++++++++++++++------------ builtin/providers/openstack/util.go | 22 ++++++++++++++++++ state/remote/swift.go | 14 +++++++++++- 4 files changed, 56 insertions(+), 17 deletions(-) diff --git a/builtin/providers/openstack/config.go b/builtin/providers/openstack/config.go index 651b66783..0c6cb8995 100644 --- a/builtin/providers/openstack/config.go +++ b/builtin/providers/openstack/config.go @@ -113,8 +113,8 @@ func (c *Config) loadAndValidate() error { transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} client.HTTPClient = http.Client{ Transport: &LogRoundTripper{ - rt: transport, - osDebug: osDebug, + Rt: transport, + OsDebug: osDebug, }, } diff --git a/builtin/providers/openstack/types.go b/builtin/providers/openstack/types.go index 9c6a4f67e..c6c6a268a 100644 --- a/builtin/providers/openstack/types.go +++ b/builtin/providers/openstack/types.go @@ -24,8 +24,8 @@ import ( // LogRoundTripper satisfies the http.RoundTripper interface and is used to // customize the default http client RoundTripper to allow for logging. type LogRoundTripper struct { - rt http.RoundTripper - osDebug bool + Rt http.RoundTripper + OsDebug bool } // RoundTrip performs a round-trip HTTP request and logs relevant information about it. @@ -37,36 +37,36 @@ func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, er }() // for future reference, this is how to access the Transport struct: - //tlsconfig := lrt.rt.(*http.Transport).TLSClientConfig + //tlsconfig := lrt.Rt.(*http.Transport).TLSClientConfig var err error - if lrt.osDebug { + if lrt.OsDebug { log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) if request.Body != nil { - request.Body, err = lrt.logRequestBody(request.Body, request.Header) + request.Body, err = lrt.logRequest(request.Body, request.Header) if err != nil { return nil, err } } } - response, err := lrt.rt.RoundTrip(request) + response, err := lrt.Rt.RoundTrip(request) if response == nil { return nil, err } - if lrt.osDebug { - response.Body, err = lrt.logResponseBody(response.Body, response.Header) + if lrt.OsDebug { + response.Body, err = lrt.logResponse(response.Body, response.Header) } return response, err } -// logRequestBody will log the HTTP Request body. +// logRequest will log the HTTP Request details. // If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logRequestBody(original io.ReadCloser, headers http.Header) (io.ReadCloser, error) { +func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, headers http.Header) (io.ReadCloser, error) { defer original.Close() var bs bytes.Buffer @@ -75,20 +75,25 @@ func (lrt *LogRoundTripper) logRequestBody(original io.ReadCloser, headers http. return nil, err } + log.Printf("[DEBUG] Openstack Request headers:\n%s", strings.Join(RedactHeaders(headers), "\n")) + + // Handle request contentType contentType := headers.Get("Content-Type") if strings.HasPrefix(contentType, "application/json") { debugInfo := lrt.formatJSON(bs.Bytes()) - log.Printf("[DEBUG] OpenStack Request Options: %s", debugInfo) + log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) } else { - log.Printf("[DEBUG] OpenStack Request Options: %s", bs.String()) + log.Printf("[DEBUG] OpenStack Request Body: %s", bs.String()) } return ioutil.NopCloser(strings.NewReader(bs.String())), nil } -// logResponseBody will log the HTTP Response body. +// logResponse will log the HTTP Response details. // If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logResponseBody(original io.ReadCloser, headers http.Header) (io.ReadCloser, error) { +func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, headers http.Header) (io.ReadCloser, error) { + log.Printf("[DEBUG] Openstack Response headers:\n%s", strings.Join(RedactHeaders(headers), "\n")) + contentType := headers.Get("Content-Type") if strings.HasPrefix(contentType, "application/json") { var bs bytes.Buffer diff --git a/builtin/providers/openstack/util.go b/builtin/providers/openstack/util.go index 70fb8914f..eb5ccdf09 100644 --- a/builtin/providers/openstack/util.go +++ b/builtin/providers/openstack/util.go @@ -2,8 +2,10 @@ package openstack import ( "fmt" + "net/http" "os" + "github.com/Unknwon/com" "github.com/gophercloud/gophercloud" "github.com/hashicorp/terraform/helper/schema" ) @@ -58,3 +60,23 @@ func MapValueSpecs(d *schema.ResourceData) map[string]string { } return m } + +// List of headers that need to be redacted +var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", + "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", + "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", + "x-subject-token"} + +// RedactHeaders processes a headers object, returning a redacted list +func RedactHeaders(headers http.Header) (processedHeaders []string) { + for name, header := range headers { + for _, v := range header { + if com.IsSliceContainsStr(REDACT_HEADERS, name) { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) + } else { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) + } + } + } + return +} diff --git a/state/remote/swift.go b/state/remote/swift.go index 6fe490d40..2f12aa4e7 100644 --- a/state/remote/swift.go +++ b/state/remote/swift.go @@ -18,6 +18,7 @@ import ( "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + tf_openstack "github.com/hashicorp/terraform/builtin/providers/openstack" ) const TFSTATE_NAME = "tfstate.tf" @@ -249,8 +250,19 @@ func (c *SwiftClient) validateConfig(conf map[string]string) (err error) { config.BuildNameToCertificate() } + // if OS_DEBUG is set, log the requests and responses + var osDebug bool + if os.Getenv("OS_DEBUG") != "" { + osDebug = true + } + transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} - provider.HTTPClient.Transport = transport + provider.HTTPClient = http.Client{ + Transport: &tf_openstack.LogRoundTripper{ + Rt: transport, + OsDebug: osDebug, + }, + } err = openstack.Authenticate(provider, ao) if err != nil { From 0d775de78eef58c231d6e4e9f5240f076666faca Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 15 Apr 2017 17:11:55 +0300 Subject: [PATCH 146/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cbca16b59..53c4c90a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ FEATURES: * **New Data Source:** `google_compute_subnetwork` [GH-12442] IMPROVEMENTS: + * state/remote/swift: Support Openstack request logging [GH-13583] * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] From 11093ae688de8cde1d164216002dbca64a0f8e2f Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Sat, 15 Apr 2017 19:35:26 +0000 Subject: [PATCH 147/342] resourceUltradnsRdpoolRead set rdata --- builtin/providers/ultradns/resource_ultradns_rdpool.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool.go b/builtin/providers/ultradns/resource_ultradns_rdpool.go index e67b57219..a45ff6939 100644 --- a/builtin/providers/ultradns/resource_ultradns_rdpool.go +++ b/builtin/providers/ultradns/resource_ultradns_rdpool.go @@ -137,11 +137,7 @@ func resourceUltradnsRdpoolRead(d *schema.ResourceData, meta interface{}) error d.Set("description", p.Description) d.Set("order", p.Order) - // TODO: rigorously test this to see if we can remove the error handling - - //TODO - - //err = d.Set("rdata", makeSetFromStrings(r.RData)) + err = d.Set("rdata", makeSetFromStrings(r.RData)) //err = d.Set("rdata", makeSetFromRdataAlone(r.RData)) if err != nil { return fmt.Errorf("rdata set failed: %#v", err) From c87459a895e5785236bc5b85af877e98c5c43bf1 Mon Sep 17 00:00:00 2001 From: Phillip Shipley Date: Sat, 15 Apr 2017 20:15:14 -0400 Subject: [PATCH 148/342] website: updated link to RE2 regex syntax page Docs had old link to google code, which does not redirect to the new syntax page. --- website/source/docs/configuration/interpolation.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 473cc7f2b..5958aef70 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -290,7 +290,7 @@ The supported built-in functions are: as a regular expression. If using a regular expression, `replace` can reference subcaptures in the regular expression by using `$n` where `n` is the index or name of the subcapture. If using a regular expression, - the syntax conforms to the [re2 regular expression syntax](https://code.google.com/p/re2/wiki/Syntax). + the syntax conforms to the [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). * `sha1(string)` - Returns a (conventional) hexadecimal representation of the SHA-1 hash of the given string. From 00dca72bde2a84b28efcdfc51e4311aec82325ef Mon Sep 17 00:00:00 2001 From: Yusuke Goto Date: Sun, 16 Apr 2017 19:31:39 +0900 Subject: [PATCH 149/342] add wafregional dependencies (#13687) --- .../aws/aws-sdk-go/service/wafregional/api.go | 5716 +++++++++++++++++ .../aws-sdk-go/service/wafregional/errors.go | 155 + .../aws-sdk-go/service/wafregional/service.go | 101 + vendor/vendor.json | 8 + 4 files changed, 5980 insertions(+) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go new file mode 100644 index 000000000..7cd68869a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go @@ -0,0 +1,5716 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package wafregional provides a client for AWS WAF Regional. +package wafregional + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/waf" +) + +const opAssociateWebACL = "AssociateWebACL" + +// AssociateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the AssociateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssociateWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateWebACLRequest method. +// req, resp := client.AssociateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACL +func (c *WAFRegional) AssociateWebACLRequest(input *AssociateWebACLInput) (req *request.Request, output *AssociateWebACLOutput) { + op := &request.Operation{ + Name: opAssociateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateWebACLInput{} + } + + output = &AssociateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateWebACL API operation for AWS WAF Regional. +// +// Associates a web ACL with a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation AssociateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFUnavailableEntityException "WAFUnavailableEntityException" +// The operation failed because the entity referenced is temporarily unavailable. +// Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACL +func (c *WAFRegional) AssociateWebACL(input *AssociateWebACLInput) (*AssociateWebACLOutput, error) { + req, out := c.AssociateWebACLRequest(input) + return out, req.Send() +} + +// AssociateWebACLWithContext is the same as AssociateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) AssociateWebACLWithContext(ctx aws.Context, input *AssociateWebACLInput, opts ...request.Option) (*AssociateWebACLOutput, error) { + req, out := c.AssociateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateByteMatchSet = "CreateByteMatchSet" + +// CreateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateByteMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateByteMatchSetRequest method. +// req, resp := client.CreateByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateByteMatchSet +func (c *WAFRegional) CreateByteMatchSetRequest(input *waf.CreateByteMatchSetInput) (req *request.Request, output *waf.CreateByteMatchSetOutput) { + op := &request.Operation{ + Name: opCreateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateByteMatchSetInput{} + } + + output = &waf.CreateByteMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateByteMatchSet API operation for AWS WAF Regional. +// +// Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part +// of a web request that you want AWS WAF to inspect, such as the values of +// the User-Agent header or the query string. For example, you can create a +// ByteMatchSet that matches any requests with User-Agent headers that contain +// the string BadBot. You can then configure AWS WAF to reject those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateByteMatchSet request. +// +// Submit a CreateByteMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. +// +// Submit an UpdateByteMatchSet request to specify the part of the request that +// you want AWS WAF to inspect (for example, the header or the URI) and the +// value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateByteMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateByteMatchSet +func (c *WAFRegional) CreateByteMatchSet(input *waf.CreateByteMatchSetInput) (*waf.CreateByteMatchSetOutput, error) { + req, out := c.CreateByteMatchSetRequest(input) + return out, req.Send() +} + +// CreateByteMatchSetWithContext is the same as CreateByteMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateByteMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateByteMatchSetWithContext(ctx aws.Context, input *waf.CreateByteMatchSetInput, opts ...request.Option) (*waf.CreateByteMatchSetOutput, error) { + req, out := c.CreateByteMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateIPSet = "CreateIPSet" + +// CreateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateIPSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateIPSetRequest method. +// req, resp := client.CreateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateIPSet +func (c *WAFRegional) CreateIPSetRequest(input *waf.CreateIPSetInput) (req *request.Request, output *waf.CreateIPSetOutput) { + op := &request.Operation{ + Name: opCreateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateIPSetInput{} + } + + output = &waf.CreateIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateIPSet API operation for AWS WAF Regional. +// +// Creates an IPSet, which you use to specify which web requests you want to +// allow or block based on the IP addresses that the requests originate from. +// For example, if you're receiving a lot of requests from one or more individual +// IP addresses or one or more ranges of IP addresses and you want to block +// the requests, you can create an IPSet that contains those IP addresses and +// then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateIPSet request. +// +// Submit a CreateIPSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateIPSet +func (c *WAFRegional) CreateIPSet(input *waf.CreateIPSetInput) (*waf.CreateIPSetOutput, error) { + req, out := c.CreateIPSetRequest(input) + return out, req.Send() +} + +// CreateIPSetWithContext is the same as CreateIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateIPSetWithContext(ctx aws.Context, input *waf.CreateIPSetInput, opts ...request.Option) (*waf.CreateIPSetOutput, error) { + req, out := c.CreateIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRule = "CreateRule" + +// CreateRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateRule for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRuleRequest method. +// req, resp := client.CreateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule +func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *request.Request, output *waf.CreateRuleOutput) { + op := &request.Operation{ + Name: opCreateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateRuleInput{} + } + + output = &waf.CreateRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRule API operation for AWS WAF Regional. +// +// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and +// other predicates that identify the requests that you want to block. If you +// add more than one predicate to a Rule, a request must match all of the specifications +// to be allowed or blocked. For example, suppose you add the following to a +// Rule: +// +// * An IPSet that matches the IP address 192.0.2.44/32 +// +// * A ByteMatchSet that matches BadBot in the User-Agent header +// +// You then add the Rule to a WebACL and specify that you want to blocks requests +// that satisfy the Rule. For a request to be blocked, it must come from the +// IP address 192.0.2.44 and the User-Agent header in the request must contain +// the value BadBot. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. For +// more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateRule request. +// +// Submit a CreateRule request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. +// +// Submit an UpdateRule request to specify the predicates that you want to include +// in the Rule. +// +// Create and update a WebACL that contains the Rule. For more information, +// see CreateWebACL. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule +func (c *WAFRegional) CreateRule(input *waf.CreateRuleInput) (*waf.CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + return out, req.Send() +} + +// CreateRuleWithContext is the same as CreateRule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateRuleWithContext(ctx aws.Context, input *waf.CreateRuleInput, opts ...request.Option) (*waf.CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSizeConstraintSet = "CreateSizeConstraintSet" + +// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateSizeConstraintSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSizeConstraintSetRequest method. +// req, resp := client.CreateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet +func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstraintSetInput) (req *request.Request, output *waf.CreateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opCreateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateSizeConstraintSetInput{} + } + + output = &waf.CreateSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSizeConstraintSet API operation for AWS WAF Regional. +// +// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify +// the part of a web request that you want AWS WAF to check for length, such +// as the length of the User-Agent header or the length of the query string. +// For example, you can create a SizeConstraintSet that matches any requests +// that have a query string that is longer than 100 bytes. You can then configure +// AWS WAF to reject those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSizeConstraintSet request. +// +// Submit a CreateSizeConstraintSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. +// +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet +func (c *WAFRegional) CreateSizeConstraintSet(input *waf.CreateSizeConstraintSetInput) (*waf.CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) + return out, req.Send() +} + +// CreateSizeConstraintSetWithContext is the same as CreateSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateSizeConstraintSetWithContext(ctx aws.Context, input *waf.CreateSizeConstraintSetInput, opts ...request.Option) (*waf.CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" + +// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateSqlInjectionMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. +// req, resp := client.CreateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet +func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInjectionMatchSetInput) (req *request.Request, output *waf.CreateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opCreateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateSqlInjectionMatchSetInput{} + } + + output = &waf.CreateSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSqlInjectionMatchSet API operation for AWS WAF Regional. +// +// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests +// that contain snippets of SQL code in a specified part of web requests. AWS +// WAF searches for character sequences that are likely to be malicious strings. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSqlInjectionMatchSet request. +// +// Submit a CreateSqlInjectionMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSqlInjectionMatchSet request. +// +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests in which you want to allow, block, or count malicious SQL code. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet +func (c *WAFRegional) CreateSqlInjectionMatchSet(input *waf.CreateSqlInjectionMatchSetInput) (*waf.CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// CreateSqlInjectionMatchSetWithContext is the same as CreateSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.CreateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateWebACL = "CreateWebACL" + +// CreateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateWebACLRequest method. +// req, resp := client.CreateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL +func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *request.Request, output *waf.CreateWebACLOutput) { + op := &request.Operation{ + Name: opCreateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateWebACLInput{} + } + + output = &waf.CreateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWebACL API operation for AWS WAF Regional. +// +// Creates a WebACL, which contains the Rules that identify the CloudFront web +// requests that you want to allow, block, or count. AWS WAF evaluates Rules +// in order based on the value of Priority for each Rule. +// +// You also specify a default action, either ALLOW or BLOCK. If a web request +// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request +// with the default action. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the ByteMatchSet objects and other predicates that you +// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, +// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateWebACL request. +// +// Submit a CreateWebACL request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// For more information about how to use the AWS WAF API, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL +func (c *WAFRegional) CreateWebACL(input *waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) + return out, req.Send() +} + +// CreateWebACLWithContext is the same as CreateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateWebACLWithContext(ctx aws.Context, input *waf.CreateWebACLInput, opts ...request.Option) (*waf.CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateXssMatchSet = "CreateXssMatchSet" + +// CreateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateXssMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateXssMatchSetRequest method. +// req, resp := client.CreateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet +func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput) (req *request.Request, output *waf.CreateXssMatchSetOutput) { + op := &request.Operation{ + Name: opCreateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateXssMatchSetInput{} + } + + output = &waf.CreateXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateXssMatchSet API operation for AWS WAF Regional. +// +// Creates an XssMatchSet, which you use to allow, block, or count requests +// that contain cross-site scripting attacks in the specified part of web requests. +// AWS WAF searches for character sequences that are likely to be malicious +// strings. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateXssMatchSet request. +// +// Submit a CreateXssMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateXssMatchSet request. +// +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// in which you want to allow, block, or count cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet +func (c *WAFRegional) CreateXssMatchSet(input *waf.CreateXssMatchSetInput) (*waf.CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) + return out, req.Send() +} + +// CreateXssMatchSetWithContext is the same as CreateXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateXssMatchSetWithContext(ctx aws.Context, input *waf.CreateXssMatchSetInput, opts ...request.Option) (*waf.CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteByteMatchSet = "DeleteByteMatchSet" + +// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteByteMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteByteMatchSetRequest method. +// req, resp := client.DeleteByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet +func (c *WAFRegional) DeleteByteMatchSetRequest(input *waf.DeleteByteMatchSetInput) (req *request.Request, output *waf.DeleteByteMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteByteMatchSetInput{} + } + + output = &waf.DeleteByteMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteByteMatchSet API operation for AWS WAF Regional. +// +// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's +// still used in any Rules or if it still includes any ByteMatchTuple objects +// (any filters). +// +// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a ByteMatchSet, perform the following steps: +// +// Update the ByteMatchSet to remove filters, if any. For more information, +// see UpdateByteMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteByteMatchSet request. +// +// Submit a DeleteByteMatchSet request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteByteMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet +func (c *WAFRegional) DeleteByteMatchSet(input *waf.DeleteByteMatchSetInput) (*waf.DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) + return out, req.Send() +} + +// DeleteByteMatchSetWithContext is the same as DeleteByteMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteByteMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteByteMatchSetWithContext(ctx aws.Context, input *waf.DeleteByteMatchSetInput, opts ...request.Option) (*waf.DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteIPSet = "DeleteIPSet" + +// DeleteIPSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteIPSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIPSetRequest method. +// req, resp := client.DeleteIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet +func (c *WAFRegional) DeleteIPSetRequest(input *waf.DeleteIPSetInput) (req *request.Request, output *waf.DeleteIPSetOutput) { + op := &request.Operation{ + Name: opDeleteIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteIPSetInput{} + } + + output = &waf.DeleteIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteIPSet API operation for AWS WAF Regional. +// +// Permanently deletes an IPSet. You can't delete an IPSet if it's still used +// in any Rules or if it still includes any IP addresses. +// +// If you just want to remove an IPSet from a Rule, use UpdateRule. +// +// To permanently delete an IPSet from AWS WAF, perform the following steps: +// +// Update the IPSet to remove IP address ranges, if any. For more information, +// see UpdateIPSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteIPSet request. +// +// Submit a DeleteIPSet request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet +func (c *WAFRegional) DeleteIPSet(input *waf.DeleteIPSetInput) (*waf.DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) + return out, req.Send() +} + +// DeleteIPSetWithContext is the same as DeleteIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteIPSetWithContext(ctx aws.Context, input *waf.DeleteIPSetInput, opts ...request.Option) (*waf.DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteRule for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule +func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *request.Request, output *waf.DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteRuleInput{} + } + + output = &waf.DeleteRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRule API operation for AWS WAF Regional. +// +// Permanently deletes a Rule. You can't delete a Rule if it's still used in +// any WebACL objects or if it still includes any predicates, such as ByteMatchSet +// objects. +// +// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// +// To permanently delete a Rule from AWS WAF, perform the following steps: +// +// Update the Rule to remove predicates, if any. For more information, see UpdateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteRule request. +// +// Submit a DeleteRule request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule +func (c *WAFRegional) DeleteRule(input *waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + return out, req.Send() +} + +// DeleteRuleWithContext is the same as DeleteRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteRuleWithContext(ctx aws.Context, input *waf.DeleteRuleInput, opts ...request.Option) (*waf.DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" + +// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteSizeConstraintSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSizeConstraintSetRequest method. +// req, resp := client.DeleteSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet +func (c *WAFRegional) DeleteSizeConstraintSetRequest(input *waf.DeleteSizeConstraintSetInput) (req *request.Request, output *waf.DeleteSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opDeleteSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteSizeConstraintSetInput{} + } + + output = &waf.DeleteSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSizeConstraintSet API operation for AWS WAF Regional. +// +// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet +// if it's still used in any Rules or if it still includes any SizeConstraint +// objects (any filters). +// +// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// +// To permanently delete a SizeConstraintSet, perform the following steps: +// +// Update the SizeConstraintSet to remove filters, if any. For more information, +// see UpdateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSizeConstraintSet request. +// +// Submit a DeleteSizeConstraintSet request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet +func (c *WAFRegional) DeleteSizeConstraintSet(input *waf.DeleteSizeConstraintSetInput) (*waf.DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) + return out, req.Send() +} + +// DeleteSizeConstraintSetWithContext is the same as DeleteSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteSizeConstraintSetWithContext(ctx aws.Context, input *waf.DeleteSizeConstraintSetInput, opts ...request.Option) (*waf.DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" + +// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteSqlInjectionMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. +// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet +func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInjectionMatchSetInput) (req *request.Request, output *waf.DeleteSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteSqlInjectionMatchSetInput{} + } + + output = &waf.DeleteSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSqlInjectionMatchSet API operation for AWS WAF Regional. +// +// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet +// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple +// objects. +// +// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// steps: +// +// Update the SqlInjectionMatchSet to remove filters, if any. For more information, +// see UpdateSqlInjectionMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSqlInjectionMatchSet request. +// +// Submit a DeleteSqlInjectionMatchSet request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet +func (c *WAFRegional) DeleteSqlInjectionMatchSet(input *waf.DeleteSqlInjectionMatchSetInput) (*waf.DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// DeleteSqlInjectionMatchSetWithContext is the same as DeleteSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.DeleteSqlInjectionMatchSetInput, opts ...request.Option) (*waf.DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteWebACL = "DeleteWebACL" + +// DeleteWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteWebACLRequest method. +// req, resp := client.DeleteWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL +func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *request.Request, output *waf.DeleteWebACLOutput) { + op := &request.Operation{ + Name: opDeleteWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteWebACLInput{} + } + + output = &waf.DeleteWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteWebACL API operation for AWS WAF Regional. +// +// Permanently deletes a WebACL. You can't delete a WebACL if it still contains +// any Rules. +// +// To delete a WebACL, perform the following steps: +// +// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. +// +// Submit a DeleteWebACL request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL +func (c *WAFRegional) DeleteWebACL(input *waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) + return out, req.Send() +} + +// DeleteWebACLWithContext is the same as DeleteWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteWebACLWithContext(ctx aws.Context, input *waf.DeleteWebACLInput, opts ...request.Option) (*waf.DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteXssMatchSet = "DeleteXssMatchSet" + +// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteXssMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteXssMatchSetRequest method. +// req, resp := client.DeleteXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet +func (c *WAFRegional) DeleteXssMatchSetRequest(input *waf.DeleteXssMatchSetInput) (req *request.Request, output *waf.DeleteXssMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.DeleteXssMatchSetInput{} + } + + output = &waf.DeleteXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteXssMatchSet API operation for AWS WAF Regional. +// +// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's +// still used in any Rules or if it still contains any XssMatchTuple objects. +// +// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. +// +// To permanently delete an XssMatchSet from AWS WAF, perform the following +// steps: +// +// Update the XssMatchSet to remove filters, if any. For more information, see +// UpdateXssMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteXssMatchSet request. +// +// Submit a DeleteXssMatchSet request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DeleteXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet +func (c *WAFRegional) DeleteXssMatchSet(input *waf.DeleteXssMatchSetInput) (*waf.DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) + return out, req.Send() +} + +// DeleteXssMatchSetWithContext is the same as DeleteXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DeleteXssMatchSetWithContext(ctx aws.Context, input *waf.DeleteXssMatchSetInput, opts ...request.Option) (*waf.DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateWebACL = "DisassociateWebACL" + +// DisassociateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DisassociateWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateWebACLRequest method. +// req, resp := client.DisassociateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL +func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) (req *request.Request, output *DisassociateWebACLOutput) { + op := &request.Operation{ + Name: opDisassociateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateWebACLInput{} + } + + output = &DisassociateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateWebACL API operation for AWS WAF Regional. +// +// Removes a web ACL from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation DisassociateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL +func (c *WAFRegional) DisassociateWebACL(input *DisassociateWebACLInput) (*DisassociateWebACLOutput, error) { + req, out := c.DisassociateWebACLRequest(input) + return out, req.Send() +} + +// DisassociateWebACLWithContext is the same as DisassociateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) DisassociateWebACLWithContext(ctx aws.Context, input *DisassociateWebACLInput, opts ...request.Option) (*DisassociateWebACLOutput, error) { + req, out := c.DisassociateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetByteMatchSet = "GetByteMatchSet" + +// GetByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetByteMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetByteMatchSetRequest method. +// req, resp := client.GetByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet +func (c *WAFRegional) GetByteMatchSetRequest(input *waf.GetByteMatchSetInput) (req *request.Request, output *waf.GetByteMatchSetOutput) { + op := &request.Operation{ + Name: opGetByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetByteMatchSetInput{} + } + + output = &waf.GetByteMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetByteMatchSet API operation for AWS WAF Regional. +// +// Returns the ByteMatchSet specified by ByteMatchSetId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetByteMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet +func (c *WAFRegional) GetByteMatchSet(input *waf.GetByteMatchSetInput) (*waf.GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) + return out, req.Send() +} + +// GetByteMatchSetWithContext is the same as GetByteMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See GetByteMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetByteMatchSetWithContext(ctx aws.Context, input *waf.GetByteMatchSetInput, opts ...request.Option) (*waf.GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetChangeToken = "GetChangeToken" + +// GetChangeTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetChangeToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeTokenRequest method. +// req, resp := client.GetChangeTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken +func (c *WAFRegional) GetChangeTokenRequest(input *waf.GetChangeTokenInput) (req *request.Request, output *waf.GetChangeTokenOutput) { + op := &request.Operation{ + Name: opGetChangeToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetChangeTokenInput{} + } + + output = &waf.GetChangeTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetChangeToken API operation for AWS WAF Regional. +// +// When you want to create, update, or delete AWS WAF objects, get a change +// token and include the change token in the create, update, or delete request. +// Change tokens ensure that your application doesn't submit conflicting requests +// to AWS WAF. +// +// Each create, update, or delete request must use a unique change token. If +// your application submits a GetChangeToken request and then submits a second +// GetChangeToken request before submitting a create, update, or delete request, +// the second GetChangeToken request returns the same value as the first GetChangeToken +// request. +// +// When you use a change token in a create, update, or delete request, the status +// of the change token changes to PENDING, which indicates that AWS WAF is propagating +// the change to all AWS WAF servers. Use GetChangeTokenStatus to determine +// the status of your change token. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetChangeToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken +func (c *WAFRegional) GetChangeToken(input *waf.GetChangeTokenInput) (*waf.GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) + return out, req.Send() +} + +// GetChangeTokenWithContext is the same as GetChangeToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetChangeToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetChangeTokenWithContext(ctx aws.Context, input *waf.GetChangeTokenInput, opts ...request.Option) (*waf.GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetChangeTokenStatus = "GetChangeTokenStatus" + +// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeTokenStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetChangeTokenStatus for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeTokenStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeTokenStatusRequest method. +// req, resp := client.GetChangeTokenStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus +func (c *WAFRegional) GetChangeTokenStatusRequest(input *waf.GetChangeTokenStatusInput) (req *request.Request, output *waf.GetChangeTokenStatusOutput) { + op := &request.Operation{ + Name: opGetChangeTokenStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetChangeTokenStatusInput{} + } + + output = &waf.GetChangeTokenStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetChangeTokenStatus API operation for AWS WAF Regional. +// +// Returns the status of a ChangeToken that you got by calling GetChangeToken. +// ChangeTokenStatus is one of the following values: +// +// * PROVISIONED: You requested the change token by calling GetChangeToken, +// but you haven't used it yet in a call to create, update, or delete an +// AWS WAF object. +// +// * PENDING: AWS WAF is propagating the create, update, or delete request +// to all AWS WAF servers. +// +// * IN_SYNC: Propagation is complete. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetChangeTokenStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus +func (c *WAFRegional) GetChangeTokenStatus(input *waf.GetChangeTokenStatusInput) (*waf.GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) + return out, req.Send() +} + +// GetChangeTokenStatusWithContext is the same as GetChangeTokenStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetChangeTokenStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetChangeTokenStatusWithContext(ctx aws.Context, input *waf.GetChangeTokenStatusInput, opts ...request.Option) (*waf.GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIPSet = "GetIPSet" + +// GetIPSetRequest generates a "aws/request.Request" representing the +// client's request for the GetIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetIPSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIPSetRequest method. +// req, resp := client.GetIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet +func (c *WAFRegional) GetIPSetRequest(input *waf.GetIPSetInput) (req *request.Request, output *waf.GetIPSetOutput) { + op := &request.Operation{ + Name: opGetIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetIPSetInput{} + } + + output = &waf.GetIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIPSet API operation for AWS WAF Regional. +// +// Returns the IPSet that is specified by IPSetId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet +func (c *WAFRegional) GetIPSet(input *waf.GetIPSetInput) (*waf.GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) + return out, req.Send() +} + +// GetIPSetWithContext is the same as GetIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See GetIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetIPSetWithContext(ctx aws.Context, input *waf.GetIPSetInput, opts ...request.Option) (*waf.GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRule = "GetRule" + +// GetRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetRule for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRuleRequest method. +// req, resp := client.GetRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule +func (c *WAFRegional) GetRuleRequest(input *waf.GetRuleInput) (req *request.Request, output *waf.GetRuleOutput) { + op := &request.Operation{ + Name: opGetRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetRuleInput{} + } + + output = &waf.GetRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRule API operation for AWS WAF Regional. +// +// Returns the Rule that is specified by the RuleId that you included in the +// GetRule request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule +func (c *WAFRegional) GetRule(input *waf.GetRuleInput) (*waf.GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) + return out, req.Send() +} + +// GetRuleWithContext is the same as GetRule with the addition of +// the ability to pass a context and additional request options. +// +// See GetRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetRuleWithContext(ctx aws.Context, input *waf.GetRuleInput, opts ...request.Option) (*waf.GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSampledRequests = "GetSampledRequests" + +// GetSampledRequestsRequest generates a "aws/request.Request" representing the +// client's request for the GetSampledRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSampledRequests for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSampledRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSampledRequestsRequest method. +// req, resp := client.GetSampledRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests +func (c *WAFRegional) GetSampledRequestsRequest(input *waf.GetSampledRequestsInput) (req *request.Request, output *waf.GetSampledRequestsOutput) { + op := &request.Operation{ + Name: opGetSampledRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetSampledRequestsInput{} + } + + output = &waf.GetSampledRequestsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSampledRequests API operation for AWS WAF Regional. +// +// Gets detailed information about a specified number of requests--a sample--that +// AWS WAF randomly selects from among the first 5,000 requests that your AWS +// resource received during a time range that you choose. You can specify a +// sample size of up to 500 requests, and you can specify any time range in +// the previous three hours. +// +// GetSampledRequests returns a time range, which is usually the time range +// that you specified. However, if your resource (such as a CloudFront distribution) +// received 5,000 requests before the specified time range elapsed, GetSampledRequests +// returns an updated time range. This new time range indicates the actual period +// during which AWS WAF selected the requests in the sample. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetSampledRequests for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests +func (c *WAFRegional) GetSampledRequests(input *waf.GetSampledRequestsInput) (*waf.GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) + return out, req.Send() +} + +// GetSampledRequestsWithContext is the same as GetSampledRequests with the addition of +// the ability to pass a context and additional request options. +// +// See GetSampledRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetSampledRequestsWithContext(ctx aws.Context, input *waf.GetSampledRequestsInput, opts ...request.Option) (*waf.GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSizeConstraintSet = "GetSizeConstraintSet" + +// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSizeConstraintSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSizeConstraintSetRequest method. +// req, resp := client.GetSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet +func (c *WAFRegional) GetSizeConstraintSetRequest(input *waf.GetSizeConstraintSetInput) (req *request.Request, output *waf.GetSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opGetSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetSizeConstraintSetInput{} + } + + output = &waf.GetSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSizeConstraintSet API operation for AWS WAF Regional. +// +// Returns the SizeConstraintSet specified by SizeConstraintSetId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet +func (c *WAFRegional) GetSizeConstraintSet(input *waf.GetSizeConstraintSetInput) (*waf.GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) + return out, req.Send() +} + +// GetSizeConstraintSetWithContext is the same as GetSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See GetSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetSizeConstraintSetWithContext(ctx aws.Context, input *waf.GetSizeConstraintSetInput, opts ...request.Option) (*waf.GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" + +// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSqlInjectionMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSqlInjectionMatchSetRequest method. +// req, resp := client.GetSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet +func (c *WAFRegional) GetSqlInjectionMatchSetRequest(input *waf.GetSqlInjectionMatchSetInput) (req *request.Request, output *waf.GetSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opGetSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetSqlInjectionMatchSetInput{} + } + + output = &waf.GetSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSqlInjectionMatchSet API operation for AWS WAF Regional. +// +// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet +func (c *WAFRegional) GetSqlInjectionMatchSet(input *waf.GetSqlInjectionMatchSetInput) (*waf.GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// GetSqlInjectionMatchSetWithContext is the same as GetSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See GetSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.GetSqlInjectionMatchSetInput, opts ...request.Option) (*waf.GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetWebACL = "GetWebACL" + +// GetWebACLRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetWebACLRequest method. +// req, resp := client.GetWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL +func (c *WAFRegional) GetWebACLRequest(input *waf.GetWebACLInput) (req *request.Request, output *waf.GetWebACLOutput) { + op := &request.Operation{ + Name: opGetWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetWebACLInput{} + } + + output = &waf.GetWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetWebACL API operation for AWS WAF Regional. +// +// Returns the WebACL that is specified by WebACLId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL +func (c *WAFRegional) GetWebACL(input *waf.GetWebACLInput) (*waf.GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) + return out, req.Send() +} + +// GetWebACLWithContext is the same as GetWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See GetWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetWebACLWithContext(ctx aws.Context, input *waf.GetWebACLInput, opts ...request.Option) (*waf.GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetWebACLForResource = "GetWebACLForResource" + +// GetWebACLForResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACLForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetWebACLForResource for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetWebACLForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetWebACLForResourceRequest method. +// req, resp := client.GetWebACLForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource +func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInput) (req *request.Request, output *GetWebACLForResourceOutput) { + op := &request.Operation{ + Name: opGetWebACLForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetWebACLForResourceInput{} + } + + output = &GetWebACLForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetWebACLForResource API operation for AWS WAF Regional. +// +// Returns the web ACL for the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetWebACLForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFUnavailableEntityException "WAFUnavailableEntityException" +// The operation failed because the entity referenced is temporarily unavailable. +// Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource +func (c *WAFRegional) GetWebACLForResource(input *GetWebACLForResourceInput) (*GetWebACLForResourceOutput, error) { + req, out := c.GetWebACLForResourceRequest(input) + return out, req.Send() +} + +// GetWebACLForResourceWithContext is the same as GetWebACLForResource with the addition of +// the ability to pass a context and additional request options. +// +// See GetWebACLForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetWebACLForResourceWithContext(ctx aws.Context, input *GetWebACLForResourceInput, opts ...request.Option) (*GetWebACLForResourceOutput, error) { + req, out := c.GetWebACLForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetXssMatchSet = "GetXssMatchSet" + +// GetXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetXssMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetXssMatchSetRequest method. +// req, resp := client.GetXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet +func (c *WAFRegional) GetXssMatchSetRequest(input *waf.GetXssMatchSetInput) (req *request.Request, output *waf.GetXssMatchSetOutput) { + op := &request.Operation{ + Name: opGetXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.GetXssMatchSetInput{} + } + + output = &waf.GetXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetXssMatchSet API operation for AWS WAF Regional. +// +// Returns the XssMatchSet that is specified by XssMatchSetId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation GetXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet +func (c *WAFRegional) GetXssMatchSet(input *waf.GetXssMatchSetInput) (*waf.GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) + return out, req.Send() +} + +// GetXssMatchSetWithContext is the same as GetXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See GetXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetXssMatchSetWithContext(ctx aws.Context, input *waf.GetXssMatchSetInput, opts ...request.Option) (*waf.GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListByteMatchSets = "ListByteMatchSets" + +// ListByteMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListByteMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListByteMatchSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListByteMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListByteMatchSetsRequest method. +// req, resp := client.ListByteMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets +func (c *WAFRegional) ListByteMatchSetsRequest(input *waf.ListByteMatchSetsInput) (req *request.Request, output *waf.ListByteMatchSetsOutput) { + op := &request.Operation{ + Name: opListByteMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListByteMatchSetsInput{} + } + + output = &waf.ListByteMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListByteMatchSets API operation for AWS WAF Regional. +// +// Returns an array of ByteMatchSetSummary objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListByteMatchSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets +func (c *WAFRegional) ListByteMatchSets(input *waf.ListByteMatchSetsInput) (*waf.ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + return out, req.Send() +} + +// ListByteMatchSetsWithContext is the same as ListByteMatchSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListByteMatchSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListByteMatchSetsWithContext(ctx aws.Context, input *waf.ListByteMatchSetsInput, opts ...request.Option) (*waf.ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListIPSets = "ListIPSets" + +// ListIPSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListIPSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListIPSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIPSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIPSetsRequest method. +// req, resp := client.ListIPSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets +func (c *WAFRegional) ListIPSetsRequest(input *waf.ListIPSetsInput) (req *request.Request, output *waf.ListIPSetsOutput) { + op := &request.Operation{ + Name: opListIPSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListIPSetsInput{} + } + + output = &waf.ListIPSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListIPSets API operation for AWS WAF Regional. +// +// Returns an array of IPSetSummary objects in the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListIPSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets +func (c *WAFRegional) ListIPSets(input *waf.ListIPSetsInput) (*waf.ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + return out, req.Send() +} + +// ListIPSetsWithContext is the same as ListIPSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListIPSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListIPSetsWithContext(ctx aws.Context, input *waf.ListIPSetsInput, opts ...request.Option) (*waf.ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListResourcesForWebACL = "ListResourcesForWebACL" + +// ListResourcesForWebACLRequest generates a "aws/request.Request" representing the +// client's request for the ListResourcesForWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListResourcesForWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListResourcesForWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListResourcesForWebACLRequest method. +// req, resp := client.ListResourcesForWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL +func (c *WAFRegional) ListResourcesForWebACLRequest(input *ListResourcesForWebACLInput) (req *request.Request, output *ListResourcesForWebACLOutput) { + op := &request.Operation{ + Name: opListResourcesForWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListResourcesForWebACLInput{} + } + + output = &ListResourcesForWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourcesForWebACL API operation for AWS WAF Regional. +// +// Returns an array of resources associated with the specified web ACL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListResourcesForWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL +func (c *WAFRegional) ListResourcesForWebACL(input *ListResourcesForWebACLInput) (*ListResourcesForWebACLOutput, error) { + req, out := c.ListResourcesForWebACLRequest(input) + return out, req.Send() +} + +// ListResourcesForWebACLWithContext is the same as ListResourcesForWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See ListResourcesForWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListResourcesForWebACLWithContext(ctx aws.Context, input *ListResourcesForWebACLInput, opts ...request.Option) (*ListResourcesForWebACLOutput, error) { + req, out := c.ListResourcesForWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListRules for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules +func (c *WAFRegional) ListRulesRequest(input *waf.ListRulesInput) (req *request.Request, output *waf.ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListRulesInput{} + } + + output = &waf.ListRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRules API operation for AWS WAF Regional. +// +// Returns an array of RuleSummary objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListRules for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules +func (c *WAFRegional) ListRules(input *waf.ListRulesInput) (*waf.ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + return out, req.Send() +} + +// ListRulesWithContext is the same as ListRules with the addition of +// the ability to pass a context and additional request options. +// +// See ListRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListRulesWithContext(ctx aws.Context, input *waf.ListRulesInput, opts ...request.Option) (*waf.ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSizeConstraintSets = "ListSizeConstraintSets" + +// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSizeConstraintSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListSizeConstraintSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSizeConstraintSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSizeConstraintSetsRequest method. +// req, resp := client.ListSizeConstraintSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets +func (c *WAFRegional) ListSizeConstraintSetsRequest(input *waf.ListSizeConstraintSetsInput) (req *request.Request, output *waf.ListSizeConstraintSetsOutput) { + op := &request.Operation{ + Name: opListSizeConstraintSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListSizeConstraintSetsInput{} + } + + output = &waf.ListSizeConstraintSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSizeConstraintSets API operation for AWS WAF Regional. +// +// Returns an array of SizeConstraintSetSummary objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListSizeConstraintSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets +func (c *WAFRegional) ListSizeConstraintSets(input *waf.ListSizeConstraintSetsInput) (*waf.ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + return out, req.Send() +} + +// ListSizeConstraintSetsWithContext is the same as ListSizeConstraintSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListSizeConstraintSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListSizeConstraintSetsWithContext(ctx aws.Context, input *waf.ListSizeConstraintSetsInput, opts ...request.Option) (*waf.ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" + +// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSqlInjectionMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListSqlInjectionMatchSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSqlInjectionMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. +// req, resp := client.ListSqlInjectionMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets +func (c *WAFRegional) ListSqlInjectionMatchSetsRequest(input *waf.ListSqlInjectionMatchSetsInput) (req *request.Request, output *waf.ListSqlInjectionMatchSetsOutput) { + op := &request.Operation{ + Name: opListSqlInjectionMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListSqlInjectionMatchSetsInput{} + } + + output = &waf.ListSqlInjectionMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSqlInjectionMatchSets API operation for AWS WAF Regional. +// +// Returns an array of SqlInjectionMatchSet objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListSqlInjectionMatchSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets +func (c *WAFRegional) ListSqlInjectionMatchSets(input *waf.ListSqlInjectionMatchSetsInput) (*waf.ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + return out, req.Send() +} + +// ListSqlInjectionMatchSetsWithContext is the same as ListSqlInjectionMatchSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListSqlInjectionMatchSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListSqlInjectionMatchSetsWithContext(ctx aws.Context, input *waf.ListSqlInjectionMatchSetsInput, opts ...request.Option) (*waf.ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListWebACLs = "ListWebACLs" + +// ListWebACLsRequest generates a "aws/request.Request" representing the +// client's request for the ListWebACLs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListWebACLs for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListWebACLs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListWebACLsRequest method. +// req, resp := client.ListWebACLsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs +func (c *WAFRegional) ListWebACLsRequest(input *waf.ListWebACLsInput) (req *request.Request, output *waf.ListWebACLsOutput) { + op := &request.Operation{ + Name: opListWebACLs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListWebACLsInput{} + } + + output = &waf.ListWebACLsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWebACLs API operation for AWS WAF Regional. +// +// Returns an array of WebACLSummary objects in the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListWebACLs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs +func (c *WAFRegional) ListWebACLs(input *waf.ListWebACLsInput) (*waf.ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + return out, req.Send() +} + +// ListWebACLsWithContext is the same as ListWebACLs with the addition of +// the ability to pass a context and additional request options. +// +// See ListWebACLs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListWebACLsWithContext(ctx aws.Context, input *waf.ListWebACLsInput, opts ...request.Option) (*waf.ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListXssMatchSets = "ListXssMatchSets" + +// ListXssMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListXssMatchSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListXssMatchSets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListXssMatchSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListXssMatchSetsRequest method. +// req, resp := client.ListXssMatchSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets +func (c *WAFRegional) ListXssMatchSetsRequest(input *waf.ListXssMatchSetsInput) (req *request.Request, output *waf.ListXssMatchSetsOutput) { + op := &request.Operation{ + Name: opListXssMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListXssMatchSetsInput{} + } + + output = &waf.ListXssMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListXssMatchSets API operation for AWS WAF Regional. +// +// Returns an array of XssMatchSet objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListXssMatchSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets +func (c *WAFRegional) ListXssMatchSets(input *waf.ListXssMatchSetsInput) (*waf.ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) + return out, req.Send() +} + +// ListXssMatchSetsWithContext is the same as ListXssMatchSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListXssMatchSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListXssMatchSetsWithContext(ctx aws.Context, input *waf.ListXssMatchSetsInput, opts ...request.Option) (*waf.ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateByteMatchSet = "UpdateByteMatchSet" + +// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateByteMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateByteMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateByteMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateByteMatchSetRequest method. +// req, resp := client.UpdateByteMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet +func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInput) (req *request.Request, output *waf.UpdateByteMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateByteMatchSetInput{} + } + + output = &waf.UpdateByteMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateByteMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For +// each ByteMatchTuple object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a ByteMatchSetUpdate object, you delete the existing object and +// add a new one. +// +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. +// +// * The bytes (typically a string that corresponds with ASCII characters) +// that you want AWS WAF to look for. For more information, including how +// you specify the values for the AWS WAF API and the AWS CLI or SDKs, see +// TargetString in the ByteMatchTuple data type. +// +// * Where to look, such as at the beginning or the end of a query string. +// +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// +// For example, you can add a ByteMatchSetUpdate object that matches web requests +// in which User-Agent headers contain the string BadBot. You can then configure +// AWS WAF to block those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Create a ByteMatchSet. For more information, see CreateByteMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. +// +// Submit an UpdateByteMatchSet request to specify the part of the request that +// you want AWS WAF to inspect (for example, the header or the URI) and the +// value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateByteMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet +func (c *WAFRegional) UpdateByteMatchSet(input *waf.UpdateByteMatchSetInput) (*waf.UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) + return out, req.Send() +} + +// UpdateByteMatchSetWithContext is the same as UpdateByteMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateByteMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateByteMatchSetWithContext(ctx aws.Context, input *waf.UpdateByteMatchSetInput, opts ...request.Option) (*waf.UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateIPSet = "UpdateIPSet" + +// UpdateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIPSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateIPSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIPSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIPSetRequest method. +// req, resp := client.UpdateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet +func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *request.Request, output *waf.UpdateIPSetOutput) { + op := &request.Operation{ + Name: opUpdateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateIPSetInput{} + } + + output = &waf.UpdateIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateIPSet API operation for AWS WAF Regional. +// +// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor +// object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change an IPSetDescriptor object, you delete the existing object and add +// a new one. +// +// * The IP address version, IPv4 or IPv6. +// +// * The IP address in CIDR notation, for example, 192.0.2.0/24 (for the +// range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 +// (for the individual IP address 192.0.2.44). +// +// AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, +// /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, +// see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// IPv6 addresses can be represented using any of the following formats: +// +// * 1111:0000:0000:0000:0000:0000:0000:0111/128 +// +// * 1111:0:0:0:0:0:0:0111/128 +// +// * 1111::0111/128 +// +// * 1111::111/128 +// +// You use an IPSet to specify which web requests you want to allow or block +// based on the IP addresses that the requests originated from. For example, +// if you're receiving a lot of requests from one or a small number of IP addresses +// and you want to block the requests, you can create an IPSet that specifies +// those IP addresses, and then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Submit a CreateIPSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. +// +// When you update an IPSet, you specify the IP addresses that you want to add +// and/or the IP addresses that you want to delete. If you want to change an +// IP address, you delete the existing IP address and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet +func (c *WAFRegional) UpdateIPSet(input *waf.UpdateIPSetInput) (*waf.UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + return out, req.Send() +} + +// UpdateIPSetWithContext is the same as UpdateIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateIPSetWithContext(ctx aws.Context, input *waf.UpdateIPSetInput, opts ...request.Option) (*waf.UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRule = "UpdateRule" + +// UpdateRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateRule for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRuleRequest method. +// req, resp := client.UpdateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule +func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *request.Request, output *waf.UpdateRuleOutput) { + op := &request.Operation{ + Name: opUpdateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateRuleInput{} + } + + output = &waf.UpdateRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRule API operation for AWS WAF Regional. +// +// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies +// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests +// that you want to allow, block, or count. If you add more than one predicate +// to a Rule, a request must match all of the specifications to be allowed, +// blocked, or counted. For example, suppose you add the following to a Rule: +// +// * A ByteMatchSet that matches the value BadBot in the User-Agent header +// +// * An IPSet that matches the IP address 192.0.2.44 +// +// You then add the Rule to a WebACL and specify that you want to block requests +// that satisfy the Rule. For a request to be blocked, the User-Agent header +// in the request must contain the value BadBotand the request must originate +// from the IP address 192.0.2.44. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// +// Create the Rule. See CreateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. +// +// Submit an UpdateRule request to add predicates to the Rule. +// +// Create and update a WebACL that contains the Rule. See CreateWebACL. +// +// If you want to replace one ByteMatchSet or IPSet with another, you delete +// the existing one and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule +func (c *WAFRegional) UpdateRule(input *waf.UpdateRuleInput) (*waf.UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + return out, req.Send() +} + +// UpdateRuleWithContext is the same as UpdateRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateRuleWithContext(ctx aws.Context, input *waf.UpdateRuleInput, opts ...request.Option) (*waf.UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" + +// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSizeConstraintSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateSizeConstraintSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSizeConstraintSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSizeConstraintSetRequest method. +// req, resp := client.UpdateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet +func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstraintSetInput) (req *request.Request, output *waf.UpdateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opUpdateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateSizeConstraintSetInput{} + } + + output = &waf.UpdateSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSizeConstraintSet API operation for AWS WAF Regional. +// +// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. +// For each SizeConstraint object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a SizeConstraintSetUpdate object, you delete the existing object +// and add a new one. +// +// * The part of a web request that you want AWS WAF to evaluate, such as +// the length of a query string or the length of the User-Agent header. +// +// * Whether to perform any transformations on the request, such as converting +// it to lowercase, before checking its length. Note that transformations +// of the request body are not supported because the AWS resource forwards +// only the first 8192 bytes of your request to AWS WAF. +// +// * A ComparisonOperator used for evaluating the selected part of the request +// against the specified Size, such as equals, greater than, less than, and +// so on. +// +// * The length, in bytes, that you want AWS WAF to watch for in selected +// part of the request. The length is computed after applying the transformation. +// +// For example, you can add a SizeConstraintSetUpdate object that matches web +// requests in which the length of the User-Agent header is greater than 100 +// bytes. You can then configure AWS WAF to block those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. +// +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet +func (c *WAFRegional) UpdateSizeConstraintSet(input *waf.UpdateSizeConstraintSetInput) (*waf.UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + return out, req.Send() +} + +// UpdateSizeConstraintSetWithContext is the same as UpdateSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateSizeConstraintSetWithContext(ctx aws.Context, input *waf.UpdateSizeConstraintSetInput, opts ...request.Option) (*waf.UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" + +// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateSqlInjectionMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSqlInjectionMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. +// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet +func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInjectionMatchSetInput) (req *request.Request, output *waf.UpdateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateSqlInjectionMatchSetInput{} + } + + output = &waf.UpdateSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSqlInjectionMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. +// For each SqlInjectionMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a SqlInjectionMatchTuple, you delete the existing +// object and add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for snippets of malicious +// SQL code. +// +// You use SqlInjectionMatchSet objects to specify which CloudFront requests +// you want to allow, block, or count. For example, if you're receiving requests +// that contain snippets of SQL code in the query string and you want to block +// the requests, you can create a SqlInjectionMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Submit a CreateSqlInjectionMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests that you want AWS WAF to inspect for snippets of SQL code. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet +func (c *WAFRegional) UpdateSqlInjectionMatchSet(input *waf.UpdateSqlInjectionMatchSetInput) (*waf.UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// UpdateSqlInjectionMatchSetWithContext is the same as UpdateSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.UpdateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWebACL = "UpdateWebACL" + +// UpdateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebACL operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateWebACL for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateWebACL method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateWebACLRequest method. +// req, resp := client.UpdateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL +func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *request.Request, output *waf.UpdateWebACLOutput) { + op := &request.Operation{ + Name: opUpdateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateWebACLInput{} + } + + output = &waf.UpdateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWebACL API operation for AWS WAF Regional. +// +// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies +// web requests that you want to allow, block, or count. When you update a WebACL, +// you specify the following values: +// +// * A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs +// the default action if a request doesn't match the criteria in any of the +// Rules in a WebACL. +// +// * The Rules that you want to add and/or delete. If you want to replace +// one Rule with another, you delete the existing Rule and add the new one. +// +// * For each Rule, whether you want AWS WAF to allow requests, block requests, +// or count requests that match the conditions in the Rule. +// +// * The order in which you want AWS WAF to evaluate the Rules in a WebACL. +// If you add more than one Rule to a WebACL, AWS WAF evaluates each request +// against the Rules in order based on the value of Priority. (The Rule that +// has the lowest value for Priority is evaluated first.) When a web request +// matches all of the predicates (such as ByteMatchSets and IPSets) in a +// Rule, AWS WAF immediately takes the corresponding action, allow or block, +// and doesn't evaluate the request against the remaining Rules in the WebACL, +// if any. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the predicates that you want to include in Rules. For more +// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, +// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. +// +// Create a WebACL. See CreateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL +func (c *WAFRegional) UpdateWebACL(input *waf.UpdateWebACLInput) (*waf.UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + return out, req.Send() +} + +// UpdateWebACLWithContext is the same as UpdateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateWebACLWithContext(ctx aws.Context, input *waf.UpdateWebACLInput, opts ...request.Option) (*waf.UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateXssMatchSet = "UpdateXssMatchSet" + +// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateXssMatchSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateXssMatchSet for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateXssMatchSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateXssMatchSetRequest method. +// req, resp := client.UpdateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet +func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput) (req *request.Request, output *waf.UpdateXssMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateXssMatchSetInput{} + } + + output = &waf.UpdateXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateXssMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For +// each XssMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a XssMatchTuple, you delete the existing object and +// add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for cross-site scripting +// attacks. +// +// You use XssMatchSet objects to specify which CloudFront requests you want +// to allow, block, or count. For example, if you're receiving requests that +// contain cross-site scripting attacks in the request body and you want to +// block the requests, you can create an XssMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Submit a CreateXssMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// that you want AWS WAF to inspect for cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, QUERY_STRING, or URI. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet +func (c *WAFRegional) UpdateXssMatchSet(input *waf.UpdateXssMatchSetInput) (*waf.UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + return out, req.Send() +} + +// UpdateXssMatchSetWithContext is the same as UpdateXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateXssMatchSetWithContext(ctx aws.Context, input *waf.UpdateXssMatchSetInput, opts ...request.Option) (*waf.UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLRequest +type AssociateWebACLInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource to be protected. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A unique identifier (ID) for the web ACL. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateWebACLInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *AssociateWebACLInput) SetResourceArn(v string) *AssociateWebACLInput { + s.ResourceArn = &v + return s +} + +// SetWebACLId sets the WebACLId field's value. +func (s *AssociateWebACLInput) SetWebACLId(v string) *AssociateWebACLInput { + s.WebACLId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLResponse +type AssociateWebACLOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebACLOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLRequest +type DisassociateWebACLInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource from which the web ACL is + // being removed. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateWebACLInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DisassociateWebACLInput) SetResourceArn(v string) *DisassociateWebACLInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLResponse +type DisassociateWebACLOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebACLOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceRequest +type GetWebACLForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource for which to get the web ACL. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetWebACLForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWebACLForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWebACLForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetWebACLForResourceInput) SetResourceArn(v string) *GetWebACLForResourceInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceResponse +type GetWebACLForResourceOutput struct { + _ struct{} `type:"structure"` + + // Information about the web ACL that you specified in the GetWebACLForResource + // request. If there is no associated resource, a null WebACLSummary is returned. + WebACLSummary *waf.WebACLSummary `type:"structure"` +} + +// String returns the string representation +func (s GetWebACLForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLForResourceOutput) GoString() string { + return s.String() +} + +// SetWebACLSummary sets the WebACLSummary field's value. +func (s *GetWebACLForResourceOutput) SetWebACLSummary(v *waf.WebACLSummary) *GetWebACLForResourceOutput { + s.WebACLSummary = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLRequest +type ListResourcesForWebACLInput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) of the web ACL for which to list the associated + // resources. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListResourcesForWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesForWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourcesForWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourcesForWebACLInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWebACLId sets the WebACLId field's value. +func (s *ListResourcesForWebACLInput) SetWebACLId(v string) *ListResourcesForWebACLInput { + s.WebACLId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLResponse +type ListResourcesForWebACLOutput struct { + _ struct{} `type:"structure"` + + // An array of ARNs (Amazon Resource Names) of the resources associated with + // the specified web ACL. An array with zero elements is returned if there are + // no resources associated with the web ACL. + ResourceArns []*string `type:"list"` +} + +// String returns the string representation +func (s ListResourcesForWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesForWebACLOutput) GoString() string { + return s.String() +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *ListResourcesForWebACLOutput) SetResourceArns(v []*string) *ListResourcesForWebACLOutput { + s.ResourceArns = v + return s +} + +const ( + // ChangeActionInsert is a ChangeAction enum value + ChangeActionInsert = "INSERT" + + // ChangeActionDelete is a ChangeAction enum value + ChangeActionDelete = "DELETE" +) + +const ( + // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value + ChangeTokenStatusProvisioned = "PROVISIONED" + + // ChangeTokenStatusPending is a ChangeTokenStatus enum value + ChangeTokenStatusPending = "PENDING" + + // ChangeTokenStatusInsync is a ChangeTokenStatus enum value + ChangeTokenStatusInsync = "INSYNC" +) + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" +) + +const ( + // IPSetDescriptorTypeIpv4 is a IPSetDescriptorType enum value + IPSetDescriptorTypeIpv4 = "IPV4" + + // IPSetDescriptorTypeIpv6 is a IPSetDescriptorType enum value + IPSetDescriptorTypeIpv6 = "IPV6" +) + +const ( + // MatchFieldTypeUri is a MatchFieldType enum value + MatchFieldTypeUri = "URI" + + // MatchFieldTypeQueryString is a MatchFieldType enum value + MatchFieldTypeQueryString = "QUERY_STRING" + + // MatchFieldTypeHeader is a MatchFieldType enum value + MatchFieldTypeHeader = "HEADER" + + // MatchFieldTypeMethod is a MatchFieldType enum value + MatchFieldTypeMethod = "METHOD" + + // MatchFieldTypeBody is a MatchFieldType enum value + MatchFieldTypeBody = "BODY" +) + +const ( + // ParameterExceptionFieldChangeAction is a ParameterExceptionField enum value + ParameterExceptionFieldChangeAction = "CHANGE_ACTION" + + // ParameterExceptionFieldWafAction is a ParameterExceptionField enum value + ParameterExceptionFieldWafAction = "WAF_ACTION" + + // ParameterExceptionFieldPredicateType is a ParameterExceptionField enum value + ParameterExceptionFieldPredicateType = "PREDICATE_TYPE" + + // ParameterExceptionFieldIpsetType is a ParameterExceptionField enum value + ParameterExceptionFieldIpsetType = "IPSET_TYPE" + + // ParameterExceptionFieldByteMatchFieldType is a ParameterExceptionField enum value + ParameterExceptionFieldByteMatchFieldType = "BYTE_MATCH_FIELD_TYPE" + + // ParameterExceptionFieldSqlInjectionMatchFieldType is a ParameterExceptionField enum value + ParameterExceptionFieldSqlInjectionMatchFieldType = "SQL_INJECTION_MATCH_FIELD_TYPE" + + // ParameterExceptionFieldByteMatchTextTransformation is a ParameterExceptionField enum value + ParameterExceptionFieldByteMatchTextTransformation = "BYTE_MATCH_TEXT_TRANSFORMATION" + + // ParameterExceptionFieldByteMatchPositionalConstraint is a ParameterExceptionField enum value + ParameterExceptionFieldByteMatchPositionalConstraint = "BYTE_MATCH_POSITIONAL_CONSTRAINT" + + // ParameterExceptionFieldSizeConstraintComparisonOperator is a ParameterExceptionField enum value + ParameterExceptionFieldSizeConstraintComparisonOperator = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" +) + +const ( + // ParameterExceptionReasonInvalidOption is a ParameterExceptionReason enum value + ParameterExceptionReasonInvalidOption = "INVALID_OPTION" + + // ParameterExceptionReasonIllegalCombination is a ParameterExceptionReason enum value + ParameterExceptionReasonIllegalCombination = "ILLEGAL_COMBINATION" +) + +const ( + // PositionalConstraintExactly is a PositionalConstraint enum value + PositionalConstraintExactly = "EXACTLY" + + // PositionalConstraintStartsWith is a PositionalConstraint enum value + PositionalConstraintStartsWith = "STARTS_WITH" + + // PositionalConstraintEndsWith is a PositionalConstraint enum value + PositionalConstraintEndsWith = "ENDS_WITH" + + // PositionalConstraintContains is a PositionalConstraint enum value + PositionalConstraintContains = "CONTAINS" + + // PositionalConstraintContainsWord is a PositionalConstraint enum value + PositionalConstraintContainsWord = "CONTAINS_WORD" +) + +const ( + // PredicateTypeIpmatch is a PredicateType enum value + PredicateTypeIpmatch = "IPMatch" + + // PredicateTypeByteMatch is a PredicateType enum value + PredicateTypeByteMatch = "ByteMatch" + + // PredicateTypeSqlInjectionMatch is a PredicateType enum value + PredicateTypeSqlInjectionMatch = "SqlInjectionMatch" + + // PredicateTypeSizeConstraint is a PredicateType enum value + PredicateTypeSizeConstraint = "SizeConstraint" + + // PredicateTypeXssMatch is a PredicateType enum value + PredicateTypeXssMatch = "XssMatch" +) + +const ( + // TextTransformationNone is a TextTransformation enum value + TextTransformationNone = "NONE" + + // TextTransformationCompressWhiteSpace is a TextTransformation enum value + TextTransformationCompressWhiteSpace = "COMPRESS_WHITE_SPACE" + + // TextTransformationHtmlEntityDecode is a TextTransformation enum value + TextTransformationHtmlEntityDecode = "HTML_ENTITY_DECODE" + + // TextTransformationLowercase is a TextTransformation enum value + TextTransformationLowercase = "LOWERCASE" + + // TextTransformationCmdLine is a TextTransformation enum value + TextTransformationCmdLine = "CMD_LINE" + + // TextTransformationUrlDecode is a TextTransformation enum value + TextTransformationUrlDecode = "URL_DECODE" +) + +const ( + // WafActionTypeBlock is a WafActionType enum value + WafActionTypeBlock = "BLOCK" + + // WafActionTypeAllow is a WafActionType enum value + WafActionTypeAllow = "ALLOW" + + // WafActionTypeCount is a WafActionType enum value + WafActionTypeCount = "COUNT" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go new file mode 100644 index 000000000..77000526a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go @@ -0,0 +1,155 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package wafregional + +const ( + + // ErrCodeWAFDisallowedNameException for service response error code + // "WAFDisallowedNameException". + // + // The name specified is invalid. + ErrCodeWAFDisallowedNameException = "WAFDisallowedNameException" + + // ErrCodeWAFInternalErrorException for service response error code + // "WAFInternalErrorException". + // + // The operation failed because of a system problem, even though the request + // was valid. Retry your request. + ErrCodeWAFInternalErrorException = "WAFInternalErrorException" + + // ErrCodeWAFInvalidAccountException for service response error code + // "WAFInvalidAccountException". + // + // The operation failed because you tried to create, update, or delete an object + // by using an invalid account identifier. + ErrCodeWAFInvalidAccountException = "WAFInvalidAccountException" + + // ErrCodeWAFInvalidOperationException for service response error code + // "WAFInvalidOperationException". + // + // The operation failed because there was nothing to do. For example: + // + // * You tried to remove a Rule from a WebACL, but the Rule isn't in the + // specified WebACL. + // + // * You tried to remove an IP address from an IPSet, but the IP address + // isn't in the specified IPSet. + // + // * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple + // isn't in the specified WebACL. + // + // * You tried to add a Rule to a WebACL, but the Rule already exists in + // the specified WebACL. + // + // * You tried to add an IP address to an IPSet, but the IP address already + // exists in the specified IPSet. + // + // * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple + // already exists in the specified WebACL. + ErrCodeWAFInvalidOperationException = "WAFInvalidOperationException" + + // ErrCodeWAFInvalidParameterException for service response error code + // "WAFInvalidParameterException". + // + // The operation failed because AWS WAF didn't recognize a parameter in the + // request. For example: + // + // * You specified an invalid parameter name. + // + // * You specified an invalid value. + // + // * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) + // using an action other than INSERT or DELETE. + // + // * You tried to create a WebACL with a DefaultActionType other than ALLOW, + // BLOCK, or COUNT. + // + // * You tried to update a WebACL with a WafActionType other than ALLOW, + // BLOCK, or COUNT. + // + // * You tried to update a ByteMatchSet with a FieldToMatchType other than + // HEADER, QUERY_STRING, or URI. + // + // * You tried to update a ByteMatchSet with a Field of HEADER but no value + // for Data. + // + // * Your request references an ARN that is malformed, or corresponds to + // a resource with which a web ACL cannot be associated. + ErrCodeWAFInvalidParameterException = "WAFInvalidParameterException" + + // ErrCodeWAFLimitsExceededException for service response error code + // "WAFLimitsExceededException". + // + // The operation exceeds a resource limit, for example, the maximum number of + // WebACL objects that you can create for an AWS account. For more information, + // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) + // in the AWS WAF Developer Guide. + ErrCodeWAFLimitsExceededException = "WAFLimitsExceededException" + + // ErrCodeWAFNonEmptyEntityException for service response error code + // "WAFNonEmptyEntityException". + // + // The operation failed because you tried to delete an object that isn't empty. + // For example: + // + // * You tried to delete a WebACL that still contains one or more Rule objects. + // + // * You tried to delete a Rule that still contains one or more ByteMatchSet + // objects or other predicates. + // + // * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple + // objects. + // + // * You tried to delete an IPSet that references one or more IP addresses. + ErrCodeWAFNonEmptyEntityException = "WAFNonEmptyEntityException" + + // ErrCodeWAFNonexistentContainerException for service response error code + // "WAFNonexistentContainerException". + // + // The operation failed because you tried to add an object to or delete an object + // from another object that doesn't exist. For example: + // + // * You tried to add a Rule to or delete a Rule from a WebACL that doesn't + // exist. + // + // * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule + // that doesn't exist. + // + // * You tried to add an IP address to or delete an IP address from an IPSet + // that doesn't exist. + // + // * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from + // a ByteMatchSet that doesn't exist. + ErrCodeWAFNonexistentContainerException = "WAFNonexistentContainerException" + + // ErrCodeWAFNonexistentItemException for service response error code + // "WAFNonexistentItemException". + // + // The operation failed because the referenced object doesn't exist. + ErrCodeWAFNonexistentItemException = "WAFNonexistentItemException" + + // ErrCodeWAFReferencedItemException for service response error code + // "WAFReferencedItemException". + // + // The operation failed because you tried to delete an object that is still + // in use. For example: + // + // * You tried to delete a ByteMatchSet that is still referenced by a Rule. + // + // * You tried to delete a Rule that is still referenced by a WebACL. + ErrCodeWAFReferencedItemException = "WAFReferencedItemException" + + // ErrCodeWAFStaleDataException for service response error code + // "WAFStaleDataException". + // + // The operation failed because you tried to create, update, or delete an object + // by using a change token that has already been used. + ErrCodeWAFStaleDataException = "WAFStaleDataException" + + // ErrCodeWAFUnavailableEntityException for service response error code + // "WAFUnavailableEntityException". + // + // The operation failed because the entity referenced is temporarily unavailable. + // Retry your request. + ErrCodeWAFUnavailableEntityException = "WAFUnavailableEntityException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go new file mode 100644 index 000000000..34865dddc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go @@ -0,0 +1,101 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package wafregional + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// This is the AWS WAF Regional API Reference for using AWS WAF with Elastic +// Load Balancing (ELB) Application Load Balancers. The AWS WAF actions and +// data types listed in the reference are available for protecting Application +// Load Balancers. You can use these actions and data types by means of the +// endpoints listed in AWS Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). +// This guide is for developers who need detailed information about the AWS +// WAF API actions, data types, and errors. For detailed information about AWS +// WAF features and an overview of how to use the AWS WAF API, see the AWS WAF +// Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28 +type WAFRegional struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "waf-regional" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the WAFRegional client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WAFRegional client from just a session. +// svc := wafregional.New(mySession) +// +// // Create a WAFRegional client with additional configuration +// svc := wafregional.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAFRegional { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAFRegional { + svc := &WAFRegional{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-11-28", + JSONVersion: "1.1", + TargetPrefix: "AWSWAF_Regional_20161128", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WAFRegional operation and runs any +// custom request initialization. +func (c *WAFRegional) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/vendor.json b/vendor/vendor.json index b7e96ab25..e5c2a1fc9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1117,6 +1117,14 @@ "version": "v1.8.10", "versionExact": "v1.8.10" }, + { + "checksumSHA1": "QgNbH3Mxe4jiu3IN+vPAnz/IWbw=", + "path": "github.com/aws/aws-sdk-go/service/wafregional", + "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", + "revisionTime": "2017-04-06T18:01:00Z", + "version": "v1.8.10", + "versionExact": "v1.8.10" + }, { "checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=", "path": "github.com/bgentry/go-netrc/netrc", From e1ac462b3f11d382fbd270784654d1729512f984 Mon Sep 17 00:00:00 2001 From: okumin Date: Mon, 17 Apr 2017 01:53:19 +0900 Subject: [PATCH 150/342] Fix billing_acount into billing_account in google_project.html.markdown (#13689) --- .../source/docs/providers/google/r/google_project.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/google/r/google_project.html.markdown b/website/source/docs/providers/google/r/google_project.html.markdown index b90af0aa2..6fb0e7c7f 100755 --- a/website/source/docs/providers/google/r/google_project.html.markdown +++ b/website/source/docs/providers/google/r/google_project.html.markdown @@ -67,7 +67,7 @@ The following arguments are supported: This is required if you are creating a new project. Changing this forces a new project to be created. -* `billing_acount` - (Optional) The alphanumeric ID of the billing account this project +* `billing_account` - (Optional) The alphanumeric ID of the billing account this project belongs to. The user or service account performing this operation with Terraform must have Billing Account Administrator privileges (`roles/billing.admin`) in the organization. See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/v1/how-tos/access-control) From f4b59f2d3aad0d273f1c858f70a2f8ccb208ecbd Mon Sep 17 00:00:00 2001 From: Charlie O'Leary Date: Sun, 16 Apr 2017 09:54:04 -0700 Subject: [PATCH 151/342] ip_set_descriptors is no longer required. (#13666) --- website/source/docs/providers/aws/r/waf_ipset.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/waf_ipset.html.markdown b/website/source/docs/providers/aws/r/waf_ipset.html.markdown index 428d25a7e..0fc43b395 100644 --- a/website/source/docs/providers/aws/r/waf_ipset.html.markdown +++ b/website/source/docs/providers/aws/r/waf_ipset.html.markdown @@ -28,7 +28,7 @@ resource "aws_waf_ipset" "ipset" { The following arguments are supported: * `name` - (Required) The name or description of the IPSet. -* `ip_set_descriptors` - (Required) The IP address type and IP address range (in CIDR notation) from which web requests originate. +* `ip_set_descriptors` - (Optional) The IP address type and IP address range (in CIDR notation) from which web requests originate. ## Remarks From acb5684052b0aeef075bfcdfc6f7e5512164b199 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:20:47 +0300 Subject: [PATCH 152/342] provider/aws: Documentation fixes for aws_api_gateway_* (#13693) Fixes: #13692 --- .../docs/providers/aws/r/api_gateway_usage_plan.html.markdown | 2 +- .../providers/aws/r/api_gateway_usage_plan_key.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown b/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown index 46fa85d8e..6a9a0800b 100644 --- a/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown @@ -6,7 +6,7 @@ description: |- Provides an API Gateway Usage Plan. --- -# aws\_api\_usage\_plan +# aws_api_gateway_usage_plan Provides an API Gateway Usage Plan. diff --git a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown b/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown index 0a4293eea..6ec44fcf1 100644 --- a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown @@ -6,7 +6,7 @@ description: |- Provides an API Gateway Usage Plan Key. --- -# aws\_api\_usage\_plan\_key +# aws_api_gateway_usage_plan_key Provides an API Gateway Usage Plan Key. From 4c25e1e269dbcaadef0d347335cdc701cb03abbb Mon Sep 17 00:00:00 2001 From: Kent Wang Date: Mon, 17 Apr 2017 06:26:06 +0800 Subject: [PATCH 153/342] provider/alicloud: Fix create PrePaid instance (#13661) (#13662) Fixes: #13661 --- .../alicloud/resource_alicloud_instance.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/builtin/providers/alicloud/resource_alicloud_instance.go b/builtin/providers/alicloud/resource_alicloud_instance.go index fe221f17b..b3cff7204 100644 --- a/builtin/providers/alicloud/resource_alicloud_instance.go +++ b/builtin/providers/alicloud/resource_alicloud_instance.go @@ -6,10 +6,11 @@ import ( "encoding/base64" "encoding/json" + "strings" + "github.com/denverdino/aliyungo/common" "github.com/denverdino/aliyungo/ecs" "github.com/hashicorp/terraform/helper/schema" - "strings" ) func resourceAliyunInstance() *schema.Resource { @@ -226,6 +227,12 @@ func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error { return err } + if args.IoOptimized == "optimized" { + args.IoOptimized = ecs.IoOptimized("true") + } else { + args.IoOptimized = ecs.IoOptimized("false") + } + runArgs, err := buildAliyunRunInstancesArgs(d, meta) if err != nil { return err @@ -578,11 +585,7 @@ func buildAliyunInstanceArgs(d *schema.ResourceData, meta interface{}) (*ecs.Cre } if v := d.Get("io_optimized").(string); v != "" { - if v == "optimized" { - args.IoOptimized = ecs.IoOptimized("true") - } else { - args.IoOptimized = ecs.IoOptimized("false") - } + args.IoOptimized = ecs.IoOptimized(v) } vswitchValue := d.Get("subnet_id").(string) From 16ec806634ce16da9b407002b30e6118640ef2f7 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:26:40 +0300 Subject: [PATCH 154/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53c4c90a6..5f21d2c84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] * core: Fix a crash condition by improving the flatmap.Expand() logic [GH-13541] + * provider/alicloud: Fix create PrePaid instance (#13661) [GH-13662] * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] From b348cff246509cfde1d1b71a512694f8ffdc7647 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:26:57 +0300 Subject: [PATCH 155/342] provider/aws: Documentation fixes for aws_inspector_resource_group (#13695) Fixes: #13684 --- .../docs/providers/aws/r/inspector_resource_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown b/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown index 42881aede..362754d59 100644 --- a/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown +++ b/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown @@ -8,7 +8,7 @@ description: |- # aws\_inspector\_resource\_group -Provides a Inspector assessment template +Provides a Inspector resource group ## Example Usage From 5c3438e394c3acab23fd063e6301c69586aa2d07 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 16 Apr 2017 18:31:43 -0400 Subject: [PATCH 156/342] Remove aws_vpc_dhcp_options if not found. (#13610) --- .../aws/resource_aws_vpc_dhcp_options.go | 13 +++++- .../aws/resource_aws_vpc_dhcp_options_test.go | 40 +++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options.go index 16c33fd4d..66de6dbeb 100644 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go +++ b/builtin/providers/aws/resource_aws_vpc_dhcp_options.go @@ -147,7 +147,18 @@ func resourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) err resp, err := conn.DescribeDhcpOptions(req) if err != nil { - return fmt.Errorf("Error retrieving DHCP Options: %s", err) + ec2err, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) + } + + if ec2err.Code() == "InvalidDhcpOptionID.NotFound" { + log.Printf("[WARN] DHCP Options (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) } if len(resp.DhcpOptions) == 0 { diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go index baa86f7d7..f101f95f3 100644 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go +++ b/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go @@ -36,6 +36,26 @@ func TestAccAWSDHCPOptions_basic(t *testing.T) { }) } +func TestAccAWSDHCPOptions_deleteOptions(t *testing.T) { + var d ec2.DhcpOptions + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDHCPOptionsDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDHCPOptionsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckDHCPOptionsExists("aws_vpc_dhcp_options.foo", &d), + testAccCheckDHCPOptionsDelete("aws_vpc_dhcp_options.foo"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckDHCPOptionsDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -104,6 +124,26 @@ func testAccCheckDHCPOptionsExists(n string, d *ec2.DhcpOptions) resource.TestCh } } +func testAccCheckDHCPOptionsDelete(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + _, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String(rs.Primary.ID), + }) + + return err + } +} + const testAccDHCPOptionsConfig = ` resource "aws_vpc_dhcp_options" "foo" { domain_name = "service.consul" From 37fe35932ef3ce36a684900d8996feefd8474bc5 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:32:14 +0300 Subject: [PATCH 157/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f21d2c84..c9cde6a19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ BUG FIXES: * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] + * provider/aws: Remove aws_vpc_dhcp_options if not found [GH-13610] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/openstack: Fix updating Ports [GH-13604] From 67f02e6a547dfbff00954872bf3db255e90ff686 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:33:30 +0300 Subject: [PATCH 158/342] provider/aws: Documentation fixes for aws_ami (#13694) * provider/aws: Documentation fixes for aws_ami Fixes: #13685 * Update ami.html.markdown --- website/source/docs/providers/aws/r/ami.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/ami.html.markdown b/website/source/docs/providers/aws/r/ami.html.markdown index 505cece26..b275289d2 100644 --- a/website/source/docs/providers/aws/r/ami.html.markdown +++ b/website/source/docs/providers/aws/r/ami.html.markdown @@ -81,12 +81,12 @@ Nested `ebs_block_device` blocks have the following structure: as the selected snapshot. * `volume_type` - (Optional) The type of EBS volume to create. Can be one of "standard" (the default), "io1" or "gp2". -* `encrypted` - (Optional) Specifies whether the destination snapshots of the copied image should be encrypted. -The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. * `kms_key_id` - (Optional) The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used +~> **Note:** You can specify `encrypted` or `snapshot_id` but not both. + Nested `ephemeral_block_device` blocks have the following structure: * `device_name` - (Required) The path at which the device is exposed to created instances. From 1af649ed5a25eb8da23ea04641fb9714bd631a33 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 16 Apr 2017 16:36:15 -0600 Subject: [PATCH 159/342] provider/openstack: Ignore fixed_ip when importing ports (#13563) --- .../openstack/import_openstack_networking_port_v2_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builtin/providers/openstack/import_openstack_networking_port_v2_test.go b/builtin/providers/openstack/import_openstack_networking_port_v2_test.go index d7453f874..562e3e0cd 100644 --- a/builtin/providers/openstack/import_openstack_networking_port_v2_test.go +++ b/builtin/providers/openstack/import_openstack_networking_port_v2_test.go @@ -22,6 +22,9 @@ func TestAccNetworkingV2Port_importBasic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "fixed_ip", + }, }, }, }) From f7f800bdfbee6e39285f7d142cc721444023c255 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Sun, 16 Apr 2017 23:37:28 +0100 Subject: [PATCH 160/342] provider/azurerm: VM Scale Sets - import support + fixes (#13464) * Ensuring we base64 decode the custom data if it's base64 encoded * Import support for VM Scale Sets * Updating the docs to mention Import support * Fixes #13009, where the SSH Keys would be set at the incorrect index (leaving a null entry at the start, causing a crash on the second apply) * Adding tests to cover the updating use-case * Adding an import linux test * Storing the base64 encoded value Making custom_data a force new, since it an't be updated * Updating the docs --- ...port_arm_virtual_machine_scale_set_test.go | 135 +++ builtin/providers/azurerm/provider.go | 2 +- .../resource_arm_virtual_machine_scale_set.go | 39 +- ...urce_arm_virtual_machine_scale_set_test.go | 871 ++++++++++++------ .../virtual_machine_scale_sets.html.markdown | 13 +- 5 files changed, 747 insertions(+), 313 deletions(-) create mode 100644 builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go diff --git a/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go b/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go new file mode 100644 index 000000000..4bd836a78 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go @@ -0,0 +1,135 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMVirtualMachineScaleSet_importBasic(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_importLinux(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + + ri := acctest.RandInt() + config := testAccAzureRMVirtualMachineScaleSet_linux(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_importLoadBalancer(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_importOverProvision(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetOverprovisionTemplate, ri, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), + testCheckAzureRMVirtualMachineScaleSetOverprovision("azurerm_virtual_machine_scale_set.test"), + ), + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_importExtension(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetExtensionTemplate, ri, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), + testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), + ), + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_importMultipleExtensions(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetMultipleExtensionsTemplate, ri, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), + testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), + ), + }, + }, + }) +} diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go index c57aae2c2..3ff68a83c 100644 --- a/builtin/providers/azurerm/provider.go +++ b/builtin/providers/azurerm/provider.go @@ -346,7 +346,7 @@ func userDataStateFunc(v interface{}) string { } } -// Base64Encode encodes data if the input isn't already encoded using +// base64Encode encodes data if the input isn't already encoded using // base64.StdEncoding.EncodeToString. If the input is already base64 encoded, // return the original input unchanged. func base64Encode(data string) string { diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go index ae240938a..a396833cc 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go @@ -19,6 +19,9 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource { Read: resourceArmVirtualMachineScaleSetRead, Update: resourceArmVirtualMachineScaleSetCreate, Delete: resourceArmVirtualMachineScaleSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -96,6 +99,7 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource { "custom_data": { Type: schema.TypeString, Optional: true, + ForceNew: true, StateFunc: userDataStateFunc, }, }, @@ -497,8 +501,9 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Error making Read request on Azure Virtual Machine Scale Set %s: %s", name, err) } - d.Set("location", resp.Location) d.Set("name", resp.Name) + d.Set("resource_group_name", resGroup) + d.Set("location", azureRMNormalizeLocation(*resp.Location)) if err := d.Set("sku", flattenAzureRmVirtualMachineScaleSetSku(resp.Sku)); err != nil { return fmt.Errorf("[DEBUG] Error setting Virtual Machine Scale Set Sku error: %#v", err) @@ -509,7 +514,12 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac d.Set("upgrade_policy_mode", properties.UpgradePolicy.Mode) d.Set("overprovision", properties.Overprovision) - if err := d.Set("os_profile", flattenAzureRMVirtualMachineScaleSetOsProfile(properties.VirtualMachineProfile.OsProfile)); err != nil { + osProfile, err := flattenAzureRMVirtualMachineScaleSetOsProfile(properties.VirtualMachineProfile.OsProfile) + if err != nil { + return fmt.Errorf("[DEBUG] Error flattening Virtual Machine Scale Set OS Profile. Error: %#v", err) + } + + if err := d.Set("os_profile", osProfile); err != nil { return fmt.Errorf("[DEBUG] Error setting Virtual Machine Scale Set OS Profile error: %#v", err) } @@ -578,7 +588,7 @@ func flattenAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(config *compute.Li result["disable_password_authentication"] = *config.DisablePasswordAuthentication if config.SSH != nil && len(*config.SSH.PublicKeys) > 0 { - ssh_keys := make([]map[string]interface{}, len(*config.SSH.PublicKeys)) + ssh_keys := make([]map[string]interface{}, 0, len(*config.SSH.PublicKeys)) for _, i := range *config.SSH.PublicKeys { key := make(map[string]interface{}) key["path"] = *i.Path @@ -710,7 +720,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual return result } -func flattenAzureRMVirtualMachineScaleSetOsProfile(profile *compute.VirtualMachineScaleSetOSProfile) []interface{} { +func flattenAzureRMVirtualMachineScaleSetOsProfile(profile *compute.VirtualMachineScaleSetOSProfile) ([]interface{}, error) { result := make(map[string]interface{}) result["computer_name_prefix"] = *profile.ComputerNamePrefix @@ -720,7 +730,7 @@ func flattenAzureRMVirtualMachineScaleSetOsProfile(profile *compute.VirtualMachi result["custom_data"] = *profile.CustomData } - return []interface{}{result} + return []interface{}{result}, nil } func flattenAzureRmVirtualMachineScaleSetStorageProfileOSDisk(profile *compute.VirtualMachineScaleSetOSDisk) []interface{} { @@ -849,7 +859,11 @@ func resourceArmVirtualMachineScaleSetsOsProfileHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["computer_name_prefix"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["admin_username"].(string))) if m["custom_data"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["custom_data"].(string))) + customData := m["custom_data"].(string) + if !isBase64Encoded(customData) { + customData = base64Encode(customData) + } + buf.WriteString(fmt.Sprintf("%s-", customData)) } return hashcode.String(buf.String()) } @@ -1076,12 +1090,12 @@ func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *schema.ResourceD linuxConfig := osProfilesLinuxConfig[0].(map[string]interface{}) disablePasswordAuth := linuxConfig["disable_password_authentication"].(bool) - config := &compute.LinuxConfiguration{ - DisablePasswordAuthentication: &disablePasswordAuth, - } linuxKeys := linuxConfig["ssh_keys"].([]interface{}) sshPublicKeys := make([]compute.SSHPublicKey, 0, len(linuxKeys)) for _, key := range linuxKeys { + if key == nil { + continue + } sshKey := key.(map[string]interface{}) path := sshKey["path"].(string) keyData := sshKey["key_data"].(string) @@ -1094,8 +1108,11 @@ func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *schema.ResourceD sshPublicKeys = append(sshPublicKeys, sshPublicKey) } - config.SSH = &compute.SSHConfiguration{ - PublicKeys: &sshPublicKeys, + config := &compute.LinuxConfiguration{ + DisablePasswordAuthentication: &disablePasswordAuth, + SSH: &compute.SSHConfiguration{ + PublicKeys: &sshPublicKeys, + }, } return config, nil diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go index e9f3d1ef9..bd7e29bb0 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go @@ -10,9 +10,9 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAzureRMVirtualMachineScaleSet_basicLinux(t *testing.T) { +func TestAccAzureRMVirtualMachineScaleSet_basic(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basicLinux, ri, ri, ri, ri, ri, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -28,9 +28,36 @@ func TestAccAzureRMVirtualMachineScaleSet_basicLinux(t *testing.T) { }) } +func TestAccAzureRMVirtualMachineScaleSet_linuxUpdated(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + ri := acctest.RandInt() + config := testAccAzureRMVirtualMachineScaleSet_linux(ri) + updatedConfig := testAccAzureRMVirtualMachineScaleSet_linuxUpdated(ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + ), + }, + { + Config: updatedConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + ), + }, + }, + }) +} + func TestAccAzureRMVirtualMachineScaleSet_basicLinux_disappears(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basicLinux, ri, ri, ri, ri, ri, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -311,87 +338,88 @@ func testCheckAzureRMVirtualMachineScaleSetExtension(name string) resource.TestC } } -var testAccAzureRMVirtualMachineScaleSet_basicLinux = ` +var testAccAzureRMVirtualMachineScaleSet_basic = ` resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" + name = "acctestRG-%d" + location = "West US" } resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" } resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" } resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } } resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" - tags { - environment = "staging" - } + tags { + environment = "staging" + } } resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" } resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "West US" + name = "acctvmss-%d" + location = "West US" resource_group_name = "${azurerm_resource_group.test.name}" upgrade_policy_mode = "Manual" sku { - name = "Standard_A0" - tier = "Standard" + name = "Standard_A0" + tier = "Standard" capacity = 2 } os_profile { computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" + admin_username = "myadmin" + admin_password = "Passwword1234" } network_profile { - name = "TestNetworkProfile-%d" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } + name = "TestNetworkProfile-%d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + } } storage_profile_os_disk { - name = "osDiskProfile" - caching = "ReadWrite" - create_option = "FromImage" + name = "osDiskProfile" + caching = "ReadWrite" + create_option = "FromImage" vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] } @@ -404,381 +432,626 @@ resource "azurerm_virtual_machine_scale_set" "test" { } ` -var testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" +func testAccAzureRMVirtualMachineScaleSet_linux(rInt int) string { + return fmt.Sprintf(` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West Europe" } resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctestvn-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + address_space = ["10.0.0.0/8"] } resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" + name = "acctestsn-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.1.0/24" } resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_type = "Standard_LRS" } resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" + name = "acctestsc-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + public_ip_address_allocation = "static" } resource "azurerm_lb" "test" { - name = "acctestlb-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctestlb-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" - frontend_ip_configuration { - name = "default" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "Dynamic" - } + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } } resource "azurerm_lb_backend_address_pool" "test" { - name = "test" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - loadbalancer_id = "${azurerm_lb.test.id}" + name = "acctestbap-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" } resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" + name = "acctestvmss-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + upgrade_policy_mode = "Automatic" - sku { - name = "Standard_A0" - tier = "Standard" - capacity = 1 - } + sku { + name = "Standard_A0" + tier = "Standard" + capacity = "1" + } - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } + os_profile { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + admin_password = "password" + custom_data = "custom data!" + } - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - load_balancer_backend_address_pool_ids = [ "${azurerm_lb_backend_address_pool.test.id}" ] - } - } + os_profile_linux_config { + disable_password_authentication = true - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } + ssh_keys { + path = "/home/ubuntu/.ssh/authorized_keys" + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" + } + } - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } + network_profile { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] + } + } + + storage_profile_os_disk { + name = "osDiskProfile" + caching = "ReadWrite" + create_option = "FromImage" + os_type = "linux" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } +} + +`, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt) +} + +func testAccAzureRMVirtualMachineScaleSet_linuxUpdated(rInt int) string { + return fmt.Sprintf(` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West Europe" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + address_space = ["10.0.0.0/8"] +} + +resource "azurerm_subnet" "test" { + name = "acctestsn-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.1.0/24" +} + +resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_type = "Standard_LRS" +} + +resource "azurerm_storage_container" "test" { + name = "acctestsc-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + public_ip_address_allocation = "static" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbap-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + name = "acctestvmss-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + upgrade_policy_mode = "Automatic" + + sku { + name = "Standard_A0" + tier = "Standard" + capacity = "1" + } + + os_profile { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + admin_password = "password" + custom_data = "custom data!" + } + + os_profile_linux_config { + disable_password_authentication = true + + ssh_keys { + path = "/home/ubuntu/.ssh/authorized_keys" + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" + } + } + + network_profile { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] + } + } + + storage_profile_os_disk { + name = "osDiskProfile" + caching = "ReadWrite" + create_option = "FromImage" + os_type = "linux" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + tags { + ThisIs = "a test" + } +} + +`, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt) +} + +var testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "southcentralus" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "southcentralus" + account_type = "Standard_LRS" +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%d" + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" + + frontend_ip_configuration { + name = "default" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "test" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "southcentralus" + loadbalancer_id = "${azurerm_lb.test.id}" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + name = "acctvmss-%d" + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" + upgrade_policy_mode = "Manual" + + sku { + name = "Standard_A0" + tier = "Standard" + capacity = 1 + } + + os_profile { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } + + network_profile { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] + } + } + + storage_profile_os_disk { + name = "os-disk" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } } ` var testAccAzureRMVirtualMachineScaleSetOverprovisionTemplate = ` resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" + name = "acctestrg-%d" + location = "southcentralus" } resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" } resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" } resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "southcentralus" + account_type = "Standard_LRS" } resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" } resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - overprovision = false + name = "acctvmss-%d" + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" + upgrade_policy_mode = "Manual" + overprovision = false - sku { - name = "Standard_A0" - tier = "Standard" - capacity = 1 - } + sku { + name = "Standard_A0" + tier = "Standard" + capacity = 1 + } - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } + os_profile { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } + network_profile { + name = "TestNetworkProfile" + primary = true - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + } + } - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } + storage_profile_os_disk { + name = "os-disk" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } } ` var testAccAzureRMVirtualMachineScaleSetExtensionTemplate = ` resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" + name = "acctestrg-%d" + location = "southcentralus" } resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" } resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" } resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "southcentralus" + account_type = "Standard_LRS" } resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" } resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - overprovision = false + name = "acctvmss-%d" + location = "southcentralus" + resource_group_name = "${azurerm_resource_group.test.name}" + upgrade_policy_mode = "Manual" + overprovision = false - sku { - name = "Standard_A0" - tier = "Standard" - capacity = 1 - } + sku { + name = "Standard_A0" + tier = "Standard" + capacity = 1 + } - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } + os_profile { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } + network_profile { + name = "TestNetworkProfile" + primary = true - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + } + } - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } + storage_profile_os_disk { + name = "os-disk" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } - extension { - name = "CustomScript" - publisher = "Microsoft.Azure.Extensions" - type = "CustomScript" - type_handler_version = "2.0" - auto_upgrade_minor_version = true - settings = < **Note:** Please note that the only allowed `path` is `/home//.ssh/authorized_keys` due to a limitation of Azure_ +~> _**Note:** Please note that the only allowed `path` is `/home//.ssh/authorized_keys` due to a limitation of Azure_ `network_profile` supports the following: @@ -225,3 +225,12 @@ The following arguments are supported: The following attributes are exported: * `id` - The virtual machine scale set ID. + + +## Import + +Virtual Machine Scale Sets can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_virtual_machine_scale_set.scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1 +``` From ce09cc1bb9407429b82c7e9f2feecafbb0b3016e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:38:08 +0300 Subject: [PATCH 161/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9cde6a19..0d2921c9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: IMPROVEMENTS: * state/remote/swift: Support Openstack request logging [GH-13583] * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] + * provider/azurerm: VM Scale Sets - import support [GH-13464] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] BUG FIXES: From da7041f4bedd1ce2bcc61433321f5a4f38b40fd8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 16 Apr 2017 18:39:42 -0400 Subject: [PATCH 162/342] Remove aws_network_acl_rule if not found. (#13608) --- .../aws/resource_aws_network_acl_rule.go | 17 +++++- .../aws/resource_aws_network_acl_rule_test.go | 56 ++++++++++++++++++- 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go index 5cce925c5..d3aa099fc 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule.go @@ -163,10 +163,14 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e // API (see issue GH-4721). Retry the `findNetworkAclRule` function until it is // visible (which in most cases is likely immediately). err = resource.Retry(3*time.Minute, func() *resource.RetryError { - _, findErr := findNetworkAclRule(d, meta) + r, findErr := findNetworkAclRule(d, meta) if findErr != nil { return resource.RetryableError(findErr) } + if r == nil { + err := fmt.Errorf("Network ACL rule (%s) not found", d.Id()) + return resource.RetryableError(err) + } return nil }) @@ -182,6 +186,11 @@ func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) err if err != nil { return err } + if resp == nil { + log.Printf("[DEBUG] Network ACL rule (%s) not found", d.Id()) + d.SetId("") + return nil + } d.Set("rule_number", resp.RuleNumber) d.Set("cidr_block", resp.CidrBlock) @@ -257,7 +266,11 @@ func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkA return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error()) } - if resp == nil || len(resp.NetworkAcls) != 1 || resp.NetworkAcls[0] == nil { + if resp == nil || len(resp.NetworkAcls) == 0 || resp.NetworkAcls[0] == nil { + // Missing NACL rule. + return nil, nil + } + if len(resp.NetworkAcls) > 1 { return nil, fmt.Errorf( "Expected to find one Network ACL, got: %#v", resp.NetworkAcls) diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go index f9ab943a8..19b34cef7 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go @@ -137,6 +137,26 @@ func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) { } +func TestAccAWSNetworkAclRule_deleteRule(t *testing.T) { + var networkAcl ec2.NetworkAcl + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSNetworkAclRuleBasicConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), + testAccCheckAWSNetworkAclRuleDelete("aws_network_acl_rule.baz"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { @@ -179,7 +199,7 @@ func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) r } if rs.Primary.ID == "" { - return fmt.Errorf("No Network ACL Id is set") + return fmt.Errorf("No Network ACL Rule Id is set") } req := &ec2.DescribeNetworkAclsInput{ @@ -209,6 +229,40 @@ func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) r } } +func testAccCheckAWSNetworkAclRuleDelete(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Network ACL Rule Id is set") + } + + egress, err := strconv.ParseBool(rs.Primary.Attributes["egress"]) + if err != nil { + return err + } + ruleNo, err := strconv.ParseInt(rs.Primary.Attributes["rule_number"], 10, 64) + if err != nil { + return err + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + _, err = conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ + NetworkAclId: aws.String(rs.Primary.Attributes["network_acl_id"]), + RuleNumber: aws.Int64(ruleNo), + Egress: aws.Bool(egress), + }) + if err != nil { + return fmt.Errorf("Error deleting Network ACL Rule (%s) in testAccCheckAWSNetworkAclRuleDelete: %s", rs.Primary.ID, err) + } + + return nil + } +} + const testAccAWSNetworkAclRuleBasicConfig = ` provider "aws" { region = "us-east-1" From 1da9a06a64e9722ca55838156e6af0c706bd4df6 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:40:20 +0300 Subject: [PATCH 163/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d2921c9a..c05b05e8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ BUG FIXES: * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] * provider/aws: Remove aws_vpc_dhcp_options if not found [GH-13610] + * provider/aws: Remove aws_network_acl_rule if not found [GH-13608] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/openstack: Fix updating Ports [GH-13604] From 8d5fdeae571a53db716357cf12b07dd21c07b3f7 Mon Sep 17 00:00:00 2001 From: Joshua Spence Date: Mon, 17 Apr 2017 08:50:52 +1000 Subject: [PATCH 164/342] Add `name_prefix` support to `aws_cloudwatch_log_group` (#13273) --- .../aws/resource_aws_cloudwatch_log_group.go | 28 +++++++++-- .../resource_aws_cloudwatch_log_group_test.go | 48 +++++++++++++++++++ builtin/providers/aws/validators.go | 20 ++++++++ builtin/providers/aws/validators_test.go | 39 ++++++++++++++- .../aws/r/cloudwatch_log_group.html.markdown | 5 +- 5 files changed, 131 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go index 7bbd30621..b0cafe8d3 100644 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/aws/aws-sdk-go/aws" @@ -24,10 +25,18 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateLogGroupName, + }, + "name_prefix": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - ValidateFunc: validateLogGroupName, + ValidateFunc: validateLogGroupNamePrefix, }, "retention_in_days": { @@ -49,10 +58,19 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource { func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn - log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string)) + var logGroupName string + if v, ok := d.GetOk("name"); ok { + logGroupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + logGroupName = resource.PrefixedUniqueId(v.(string)) + } else { + logGroupName = resource.UniqueId() + } + + log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", logGroupName) _, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String(d.Get("name").(string)), + LogGroupName: aws.String(logGroupName), }) if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceAlreadyExistsException" { @@ -61,7 +79,7 @@ func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Creating CloudWatch Log Group failed: %s '%s'", err, d.Get("name")) } - d.SetId(d.Get("name").(string)) + d.SetId(logGroupName) log.Println("[INFO] CloudWatch Log Group created") diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go index 5c59f8499..1e8a4ecb8 100644 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" @@ -30,6 +31,43 @@ func TestAccAWSCloudWatchLogGroup_basic(t *testing.T) { }) } +func TestAccAWSCloudWatchLogGroup_namePrefix(t *testing.T) { + var lg cloudwatchlogs.LogGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudWatchLogGroup_namePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.test", &lg), + resource.TestMatchResourceAttr("aws_cloudwatch_log_group.test", "name", regexp.MustCompile("^tf-test-")), + ), + }, + }, + }) +} + +func TestAccAWSCloudWatchLogGroup_generatedName(t *testing.T) { + var lg cloudwatchlogs.LogGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudWatchLogGroup_generatedName, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.test", &lg), + ), + }, + }, + }) +} + func TestAccAWSCloudWatchLogGroup_retentionPolicy(t *testing.T) { var lg cloudwatchlogs.LogGroup rInt := acctest.RandInt() @@ -256,3 +294,13 @@ resource "aws_cloudwatch_log_group" "charlie" { } `, rInt, rInt+1, rInt+2) } + +const testAccAWSCloudWatchLogGroup_namePrefix = ` +resource "aws_cloudwatch_log_group" "test" { + name_prefix = "tf-test-" +} +` + +const testAccAWSCloudWatchLogGroup_generatedName = ` +resource "aws_cloudwatch_log_group" "test" {} +` diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go index e46095cde..7b692ad93 100644 --- a/builtin/providers/aws/validators.go +++ b/builtin/providers/aws/validators.go @@ -483,6 +483,26 @@ func validateLogGroupName(v interface{}, k string) (ws []string, errors []error) return } +func validateLogGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 483 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 483 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html + pattern := `^[\.\-_/#A-Za-z0-9]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q isn't a valid log group name (alphanumeric characters, underscores,"+ + " hyphens, slashes, hash signs and dots are allowed): %q", + k, value)) + } + + return +} + func validateS3BucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { value := v.(string) _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) diff --git a/builtin/providers/aws/validators_test.go b/builtin/providers/aws/validators_test.go index 0af2b49fa..e676ff880 100644 --- a/builtin/providers/aws/validators_test.go +++ b/builtin/providers/aws/validators_test.go @@ -410,7 +410,7 @@ func TestValidateLogGroupName(t *testing.T) { for _, v := range validNames { _, errors := validateLogGroupName(v, "name") if len(errors) != 0 { - t.Fatalf("%q should be a valid Log Metric Filter Transformation Name: %q", v, errors) + t.Fatalf("%q should be a valid Log Group name: %q", v, errors) } } @@ -427,7 +427,42 @@ func TestValidateLogGroupName(t *testing.T) { for _, v := range invalidNames { _, errors := validateLogGroupName(v, "name") if len(errors) == 0 { - t.Fatalf("%q should be an invalid Log Metric Filter Transformation Name", v) + t.Fatalf("%q should be an invalid Log Group name", v) + } + } +} + +func TestValidateLogGroupNamePrefix(t *testing.T) { + validNames := []string{ + "ValidLogGroupName", + "ValidLogGroup.Name", + "valid/Log-group", + "1234", + "YadaValid#0123", + "Also_valid-name", + strings.Repeat("W", 483), + } + for _, v := range validNames { + _, errors := validateLogGroupNamePrefix(v, "name_prefix") + if len(errors) != 0 { + t.Fatalf("%q should be a valid Log Group name prefix: %q", v, errors) + } + } + + invalidNames := []string{ + "Here is a name with: colon", + "and here is another * invalid name", + "also $ invalid", + "This . is also %% invalid@!)+(", + "*", + "", + // length > 483 + strings.Repeat("W", 484), + } + for _, v := range invalidNames { + _, errors := validateLogGroupNamePrefix(v, "name_prefix") + if len(errors) == 0 { + t.Fatalf("%q should be an invalid Log Group name prefix", v) } } } diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown index 80b3a420c..bfa9fc4ad 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -27,7 +27,8 @@ resource "aws_cloudwatch_log_group" "yada" { The following arguments are supported: -* `name` - (Required) The name of the log group +* `name` - (Optional, Forces new resource) The name of the log group. If omitted, Terraform will assign a random, unique name. +* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `retention_in_days` - (Optional) Specifies the number of days you want to retain log events in the specified log group. * `tags` - (Optional) A mapping of tags to assign to the resource. @@ -45,4 +46,4 @@ Cloudwatch Log Groups can be imported using the `name`, e.g. ``` $ terraform import aws_cloudwatch_log_group.test_group yada -``` \ No newline at end of file +``` From 7480454e3127bcbb611d3f58abf5523a6ade6b2e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:51:38 +0300 Subject: [PATCH 165/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c05b05e8a..103893d45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: IMPROVEMENTS: * state/remote/swift: Support Openstack request logging [GH-13583] * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] + * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` [GH-13273] * provider/azurerm: VM Scale Sets - import support [GH-13464] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] From 8927ad5dce632de75d553e20491673bdd58a1210 Mon Sep 17 00:00:00 2001 From: Kent Wang Date: Mon, 17 Apr 2017 06:52:39 +0800 Subject: [PATCH 166/342] provider/alicloud: Fix allocate public ip error (#13267) (#13268) Wait for instance to be in STOPPED or RUNNING state before invoking AllocatePublicIP API. * provider/alicloud: Wait for instance state before allocate public ip * provider/alicloud: Fix test `TestAccAlicloudInstance_associatePublicIP` * provider/alicloud: Update alicloud_instance document Fixes: #13267 --- .../alicloud/resource_alicloud_instance.go | 22 +++++++++---------- .../resource_alicloud_instance_test.go | 15 ++++++++++++- .../alicloud/r/instance.html.markdown | 6 ++--- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/builtin/providers/alicloud/resource_alicloud_instance.go b/builtin/providers/alicloud/resource_alicloud_instance.go index b3cff7204..36297afbe 100644 --- a/builtin/providers/alicloud/resource_alicloud_instance.go +++ b/builtin/providers/alicloud/resource_alicloud_instance.go @@ -194,6 +194,12 @@ func resourceAliyunInstanceCreate(d *schema.ResourceData, meta interface{}) erro //d.Set("system_disk_category", d.Get("system_disk_category")) //d.Set("system_disk_size", d.Get("system_disk_size")) + // after instance created, its status is pending, + // so we need to wait it become to stopped and then start it + if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { + log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Stopped, err) + } + if d.Get("allocate_public_ip").(bool) { _, err := conn.AllocatePublicIpAddress(d.Id()) if err != nil { @@ -201,12 +207,6 @@ func resourceAliyunInstanceCreate(d *schema.ResourceData, meta interface{}) erro } } - // after instance created, its status is pending, - // so we need to wait it become to stopped and then start it - if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { - log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Stopped, err) - } - if err := conn.StartInstance(d.Id()); err != nil { return fmt.Errorf("Start instance got error: %#v", err) } @@ -253,6 +253,11 @@ func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error { d.Set("system_disk_category", d.Get("system_disk_category")) d.Set("system_disk_size", d.Get("system_disk_size")) + // after instance created, its status change from pending, starting to running + if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { + log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) + } + if d.Get("allocate_public_ip").(bool) { _, err := conn.AllocatePublicIpAddress(d.Id()) if err != nil { @@ -260,11 +265,6 @@ func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error { } } - // after instance created, its status change from pending, starting to running - if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { - log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) - } - return resourceAliyunInstanceUpdate(d, meta) } diff --git a/builtin/providers/alicloud/resource_alicloud_instance_test.go b/builtin/providers/alicloud/resource_alicloud_instance_test.go index 4e8f0c716..3547d4693 100644 --- a/builtin/providers/alicloud/resource_alicloud_instance_test.go +++ b/builtin/providers/alicloud/resource_alicloud_instance_test.go @@ -4,12 +4,13 @@ import ( "fmt" "testing" + "log" + "github.com/denverdino/aliyungo/common" "github.com/denverdino/aliyungo/ecs" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" - "log" ) func TestAccAlicloudInstance_basic(t *testing.T) { @@ -456,6 +457,17 @@ func TestAccAlicloudInstance_associatePublicIP(t *testing.T) { } } + testCheckPublicIP := func() resource.TestCheckFunc { + return func(*terraform.State) error { + publicIP := instance.PublicIpAddress.IpAddress[0] + if publicIP == "" { + return fmt.Errorf("can't get public IP") + } + + return nil + } + } + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -469,6 +481,7 @@ func TestAccAlicloudInstance_associatePublicIP(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists("alicloud_instance.foo", &instance), testCheckPrivateIP(), + testCheckPublicIP(), ), }, }, diff --git a/website/source/docs/providers/alicloud/r/instance.html.markdown b/website/source/docs/providers/alicloud/r/instance.html.markdown index 201beab3d..cb038ed1a 100644 --- a/website/source/docs/providers/alicloud/r/instance.html.markdown +++ b/website/source/docs/providers/alicloud/r/instance.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a ECS instance resource. --- -# alicloud_ecs +# alicloud\_instance Provides a ECS instance resource. @@ -22,7 +22,7 @@ resource "alicloud_security_group" "classic" { resource "alicloud_instance" "classic" { # cn-beijing availability_zone = "cn-beijing-b" - security_group_id = "${alicloud_security_group.classic.id}" + security_groups = ["${alicloud_security_group.classic.*.id}"] allocate_public_ip = "true" @@ -57,7 +57,7 @@ The following arguments are supported: * `image_id` - (Required) The Image to use for the instance. * `instance_type` - (Required) The type of instance to start. * `io_optimized` - (Required) Valid values are `none`, `optimized`, If `optimized`, the launched ECS instance will be I/O optimized. -* `security_group_ids` - (Optional) A list of security group ids to associate with. +* `security_groups` - (Optional) A list of security group ids to associate with. * `availability_zone` - (Optional) The Zone to start the instance in. * `instance_name` - (Optional) The name of the ECS. This instance_name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","\_", and must not begin or end with a hyphen, and must not begin with http:// or https://. If not specified, Terraform will autogenerate a default name is `ECS-Instance`. From 1f75a5f3722f89da7c97773f72996f3e6395b610 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:53:09 +0300 Subject: [PATCH 167/342] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 103893d45..32eb8c5ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,8 @@ BUG FIXES: * core: Add the close provider/provisioner transformers back [GH-13102] * core: Fix a crash condition by improving the flatmap.Expand() logic [GH-13541] - * provider/alicloud: Fix create PrePaid instance (#13661) [GH-13662] + * provider/alicloud: Fix create PrePaid instance [GH-13662] + * provider/alicloud: Fix allocate public ip error [GH-13268] * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] From b8b896e11b5a43e2ffc85eabea01a722065dee3d Mon Sep 17 00:00:00 2001 From: Paul Tyng Date: Sun, 16 Apr 2017 18:59:11 -0400 Subject: [PATCH 168/342] Update go-newrelic vendor and use new field (#13158) Fixes #12972 --- .../resource_newrelic_alert_condition.go | 6 ++ .../resource_newrelic_alert_condition_test.go | 38 ++++---- .../go-newrelic/api/alert_channels.go | 4 +- .../go-newrelic/api/alert_conditions.go | 4 + .../go-newrelic/api/alert_policy_channels.go | 2 + .../paultyng/go-newrelic/api/applications.go | 1 + .../paultyng/go-newrelic/api/client.go | 2 + .../go-newrelic/api/component_metric_data.go | 45 ++++++++++ .../go-newrelic/api/component_metrics.go | 40 +++++++++ .../paultyng/go-newrelic/api/components.go | 51 +++++++++++ .../paultyng/go-newrelic/api/labels.go | 1 + .../paultyng/go-newrelic/api/plugins.go | 51 +++++++++++ .../paultyng/go-newrelic/api/types.go | 89 ++++++++++++++++++- vendor/vendor.json | 6 +- .../newrelic/r/alert_condition.html.markdown | 1 + 15 files changed, 320 insertions(+), 21 deletions(-) create mode 100644 vendor/github.com/paultyng/go-newrelic/api/component_metric_data.go create mode 100644 vendor/github.com/paultyng/go-newrelic/api/component_metrics.go create mode 100644 vendor/github.com/paultyng/go-newrelic/api/components.go create mode 100644 vendor/github.com/paultyng/go-newrelic/api/plugins.go diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition.go b/builtin/providers/newrelic/resource_newrelic_alert_condition.go index db8ba3c9c..1021d9af2 100644 --- a/builtin/providers/newrelic/resource_newrelic_alert_condition.go +++ b/builtin/providers/newrelic/resource_newrelic_alert_condition.go @@ -106,6 +106,10 @@ func resourceNewRelicAlertCondition() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "condition_scope": { + Type: schema.TypeString, + Optional: true, + }, "term": { Type: schema.TypeList, Elem: &schema.Resource{ @@ -186,6 +190,7 @@ func buildAlertConditionStruct(d *schema.ResourceData) *newrelic.AlertCondition Metric: d.Get("metric").(string), Terms: terms, PolicyID: d.Get("policy_id").(int), + Scope: d.Get("condition_scope").(string), } if attr, ok := d.GetOk("runbook_url"); ok { @@ -226,6 +231,7 @@ func readAlertConditionStruct(condition *newrelic.AlertCondition, d *schema.Reso d.Set("type", condition.Type) d.Set("metric", condition.Metric) d.Set("runbook_url", condition.RunbookURL) + d.Set("condition_scope", condition.Scope) d.Set("user_defined_metric", condition.UserDefined.Metric) d.Set("user_defined_value_function", condition.UserDefined.ValueFunction) if err := d.Set("entities", entities); err != nil { diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go b/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go index b9c608a83..a46938d82 100644 --- a/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go +++ b/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go @@ -29,8 +29,6 @@ func TestAccNewRelicAlertCondition_Basic(t *testing.T) { "newrelic_alert_condition.foo", "runbook_url", "https://foo.example.com"), resource.TestCheckResourceAttr( "newrelic_alert_condition.foo", "entities.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "entities.0", "12345"), resource.TestCheckResourceAttr( "newrelic_alert_condition.foo", "term.#", "1"), resource.TestCheckResourceAttr( @@ -55,8 +53,6 @@ func TestAccNewRelicAlertCondition_Basic(t *testing.T) { "newrelic_alert_condition.foo", "runbook_url", "https://bar.example.com"), resource.TestCheckResourceAttr( "newrelic_alert_condition.foo", "entities.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "entities.0", "67890"), resource.TestCheckResourceAttr( "newrelic_alert_condition.foo", "term.#", "1"), resource.TestCheckResourceAttr( @@ -136,6 +132,10 @@ func testAccCheckNewRelicAlertConditionExists(n string) resource.TestCheckFunc { func testAccCheckNewRelicAlertConditionConfig(rName string) string { return fmt.Sprintf(` +data "newrelic_application" "app" { + name = "%[2]s" +} + resource "newrelic_alert_policy" "foo" { name = "tf-test-%[1]s" } @@ -143,11 +143,12 @@ resource "newrelic_alert_policy" "foo" { resource "newrelic_alert_condition" "foo" { policy_id = "${newrelic_alert_policy.foo.id}" - name = "tf-test-%[1]s" - type = "apm_app_metric" - entities = ["12345"] - metric = "apdex" - runbook_url = "https://foo.example.com" + name = "tf-test-%[1]s" + type = "apm_app_metric" + entities = ["${data.newrelic_application.app.id}"] + metric = "apdex" + runbook_url = "https://foo.example.com" + condition_scope = "application" term { duration = 5 @@ -157,11 +158,15 @@ resource "newrelic_alert_condition" "foo" { time_function = "all" } } -`, rName) +`, rName, testAccExpectedApplicationName) } func testAccCheckNewRelicAlertConditionConfigUpdated(rName string) string { return fmt.Sprintf(` +data "newrelic_application" "app" { + name = "%[2]s" +} + resource "newrelic_alert_policy" "foo" { name = "tf-test-updated-%[1]s" } @@ -169,11 +174,12 @@ resource "newrelic_alert_policy" "foo" { resource "newrelic_alert_condition" "foo" { policy_id = "${newrelic_alert_policy.foo.id}" - name = "tf-test-updated-%[1]s" - type = "apm_app_metric" - entities = ["67890"] - metric = "apdex" - runbook_url = "https://bar.example.com" + name = "tf-test-updated-%[1]s" + type = "apm_app_metric" + entities = ["${data.newrelic_application.app.id}"] + metric = "apdex" + runbook_url = "https://bar.example.com" + condition_scope = "application" term { duration = 10 @@ -183,7 +189,7 @@ resource "newrelic_alert_condition" "foo" { time_function = "all" } } -`, rName) +`, rName, testAccExpectedApplicationName) } // TODO: const testAccCheckNewRelicAlertConditionConfigMulti = ` diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go b/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go index edb078c28..c4e6b632a 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go @@ -52,6 +52,7 @@ func (c *Client) ListAlertChannels() ([]AlertChannel, error) { return c.queryAlertChannels() } +// CreateAlertChannel allows you to create an alert channel with the specified data and links. func (c *Client) CreateAlertChannel(channel AlertChannel) (*AlertChannel, error) { // TODO: support attaching policy ID's here? // qs := map[string]string{ @@ -59,7 +60,7 @@ func (c *Client) CreateAlertChannel(channel AlertChannel) (*AlertChannel, error) // } if len(channel.Links.PolicyIDs) > 0 { - return nil, fmt.Errorf("You cannot create an alert channel with policy IDs, you must attach polidy IDs after creation.") + return nil, fmt.Errorf("cannot create an alert channel with policy IDs, you must attach polidy IDs after creation") } req := struct { @@ -80,6 +81,7 @@ func (c *Client) CreateAlertChannel(channel AlertChannel) (*AlertChannel, error) return &resp.Channels[0], nil } +// DeleteAlertChannel deletes the alert channel with the specified ID. func (c *Client) DeleteAlertChannel(id int) error { u := &url.URL{Path: fmt.Sprintf("/alerts_channels/%v.json", id)} _, err := c.Do("DELETE", u.String(), nil, nil) diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go b/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go index 45c5a9f89..6dd0ad15e 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go @@ -41,6 +41,7 @@ func (c *Client) queryAlertConditions(policyID int) ([]AlertCondition, error) { return conditions, nil } +// GetAlertCondition gets information about an alert condition given an ID and policy ID. func (c *Client) GetAlertCondition(policyID int, id int) (*AlertCondition, error) { conditions, err := c.queryAlertConditions(policyID) if err != nil { @@ -61,6 +62,7 @@ func (c *Client) ListAlertConditions(policyID int) ([]AlertCondition, error) { return c.queryAlertConditions(policyID) } +// CreateAlertCondition creates an alert condition given the passed configuration. func (c *Client) CreateAlertCondition(condition AlertCondition) (*AlertCondition, error) { policyID := condition.PolicyID @@ -85,6 +87,7 @@ func (c *Client) CreateAlertCondition(condition AlertCondition) (*AlertCondition return &resp.Condition, nil } +// UpdateAlertCondition updates an alert condition with the specified changes. func (c *Client) UpdateAlertCondition(condition AlertCondition) (*AlertCondition, error) { policyID := condition.PolicyID id := condition.ID @@ -110,6 +113,7 @@ func (c *Client) UpdateAlertCondition(condition AlertCondition) (*AlertCondition return &resp.Condition, nil } +// DeleteAlertCondition removes the alert condition given the specified ID and policy ID. func (c *Client) DeleteAlertCondition(policyID int, id int) error { u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/%v.json", id)} _, err := c.Do("DELETE", u.String(), nil, nil) diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go b/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go index 3c822131f..8a67e68e9 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go @@ -6,6 +6,7 @@ import ( "strconv" ) +// UpdateAlertPolicyChannels updates a policy by adding the specified notification channels. func (c *Client) UpdateAlertPolicyChannels(policyID int, channelIDs []int) error { channelIDStrings := make([]string, len(channelIDs)) @@ -30,6 +31,7 @@ func (c *Client) UpdateAlertPolicyChannels(policyID int, channelIDs []int) error return err } +// DeleteAlertPolicyChannel deletes a notification channel from an alert policy. func (c *Client) DeleteAlertPolicyChannel(policyID int, channelID int) error { reqURL, err := url.Parse("/alerts_policy_channels.json") if err != nil { diff --git a/vendor/github.com/paultyng/go-newrelic/api/applications.go b/vendor/github.com/paultyng/go-newrelic/api/applications.go index 54af0bda2..dc3bdf2e0 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/applications.go +++ b/vendor/github.com/paultyng/go-newrelic/api/applications.go @@ -53,6 +53,7 @@ func (c *Client) queryApplications(filters applicationsFilters) ([]Application, return applications, nil } +// ListApplications lists all the applications you have access to. func (c *Client) ListApplications() ([]Application, error) { return c.queryApplications(applicationsFilters{}) } diff --git a/vendor/github.com/paultyng/go-newrelic/api/client.go b/vendor/github.com/paultyng/go-newrelic/api/client.go index e46d6823c..936ffef1a 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/client.go +++ b/vendor/github.com/paultyng/go-newrelic/api/client.go @@ -13,6 +13,7 @@ type Client struct { RestyClient *resty.Client } +// ErrorResponse represents an error response from New Relic. type ErrorResponse struct { Detail *ErrorDetail `json:"error,omitempty"` } @@ -24,6 +25,7 @@ func (e *ErrorResponse) Error() string { return "Unknown error" } +// ErrorDetail represents the details of an ErrorResponse from New Relic. type ErrorDetail struct { Title string `json:"title,omitempty"` } diff --git a/vendor/github.com/paultyng/go-newrelic/api/component_metric_data.go b/vendor/github.com/paultyng/go-newrelic/api/component_metric_data.go new file mode 100644 index 000000000..a8d743e9b --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/component_metric_data.go @@ -0,0 +1,45 @@ +package api + +import ( + "fmt" + "net/url" +) + +func (c *Client) queryComponentMetricData(componentID int, names []string) ([]Metric, error) { + data := []Metric{} + + reqURL, err := url.Parse(fmt.Sprintf("/components/%v/metrics/data.json", componentID)) + if err != nil { + return nil, err + } + + qs := reqURL.Query() + for _, name := range names { + qs.Add("names[]", name) + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + MetricData struct { + Metrics []Metric `json:"metrics"` + } `json:"metric_data,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + data = append(data, resp.MetricData.Metrics...) + } + + return data, nil +} + +// ListComponentMetricData lists all the metric data for the specified component ID and metric names. +func (c *Client) ListComponentMetricData(componentID int, names []string) ([]Metric, error) { + return c.queryComponentMetricData(componentID, names) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/component_metrics.go b/vendor/github.com/paultyng/go-newrelic/api/component_metrics.go new file mode 100644 index 000000000..60b99ca9a --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/component_metrics.go @@ -0,0 +1,40 @@ +package api + +import ( + "fmt" + "net/url" +) + +func (c *Client) queryComponentMetrics(componentID int) ([]ComponentMetric, error) { + metrics := []ComponentMetric{} + + reqURL, err := url.Parse(fmt.Sprintf("/components/%v/metrics.json", componentID)) + if err != nil { + return nil, err + } + + qs := reqURL.Query() + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Metrics []ComponentMetric `json:"metrics,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + metrics = append(metrics, resp.Metrics...) + } + + return metrics, nil +} + +// ListComponentMetrics lists all the component metrics for the specificed component ID. +func (c *Client) ListComponentMetrics(componentID int) ([]ComponentMetric, error) { + return c.queryComponentMetrics(componentID) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/components.go b/vendor/github.com/paultyng/go-newrelic/api/components.go new file mode 100644 index 000000000..a6acfc375 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/components.go @@ -0,0 +1,51 @@ +package api + +import ( + "net/url" + "strconv" +) + +type componentsFilters struct { + PluginID int + IDs []int +} + +func (c *Client) queryComponents(filters componentsFilters) ([]Component, error) { + components := []Component{} + + reqURL, err := url.Parse("/components.json") + if err != nil { + return nil, err + } + + qs := reqURL.Query() + qs.Set("filter[plugin_id]", strconv.Itoa(filters.PluginID)) + for _, id := range filters.IDs { + qs.Add("filter[ids]", strconv.Itoa(id)) + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Components []Component `json:"components,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + components = append(components, resp.Components...) + } + + return components, nil +} + +// ListComponents lists all the components for the specified plugin ID. +func (c *Client) ListComponents(pluginID int) ([]Component, error) { + return c.queryComponents(componentsFilters{ + PluginID: pluginID, + }) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/labels.go b/vendor/github.com/paultyng/go-newrelic/api/labels.go index 933fd9607..7db1f560c 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/labels.go +++ b/vendor/github.com/paultyng/go-newrelic/api/labels.go @@ -31,6 +31,7 @@ func (c *Client) queryLabels() ([]Label, error) { return labels, nil } +// GetLabel gets the label for the specified key. func (c *Client) GetLabel(key string) (*Label, error) { labels, err := c.queryLabels() if err != nil { diff --git a/vendor/github.com/paultyng/go-newrelic/api/plugins.go b/vendor/github.com/paultyng/go-newrelic/api/plugins.go new file mode 100644 index 000000000..5cf917113 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/plugins.go @@ -0,0 +1,51 @@ +package api + +import ( + "net/url" + "strconv" +) + +type pluginsFilters struct { + GUID *string + IDs []int +} + +func (c *Client) queryPlugins(filters pluginsFilters) ([]Plugin, error) { + plugins := []Plugin{} + + reqURL, err := url.Parse("/plugins.json") + if err != nil { + return nil, err + } + + qs := reqURL.Query() + if filters.GUID != nil { + qs.Set("filter[guid]", *filters.GUID) + } + for _, id := range filters.IDs { + qs.Add("filter[ids]", strconv.Itoa(id)) + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Plugins []Plugin `json:"plugins,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + plugins = append(plugins, resp.Plugins...) + } + + return plugins, nil +} + +// ListPlugins lists all the plugins you have access to. +func (c *Client) ListPlugins() ([]Plugin, error) { + return c.queryPlugins(pluginsFilters{}) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/types.go b/vendor/github.com/paultyng/go-newrelic/api/types.go index df5f88773..cb7bbbf0a 100644 --- a/vendor/github.com/paultyng/go-newrelic/api/types.go +++ b/vendor/github.com/paultyng/go-newrelic/api/types.go @@ -3,6 +3,7 @@ package api import "errors" var ( + // ErrNotFound is returned when the resource was not found in New Relic. ErrNotFound = errors.New("newrelic: Resource not found") ) @@ -57,7 +58,8 @@ type AlertCondition struct { Metric string `json:"metric,omitempty"` RunbookURL string `json:"runbook_url,omitempty"` Terms []AlertConditionTerm `json:"terms,omitempty"` - UserDefined AlertConditionUserDefined `json:"uder_defined,omitempty"` + UserDefined AlertConditionUserDefined `json:"user_defined,omitempty"` + Scope string `json:"condition_scope,omitempty"` } // AlertChannelLinks represent the links between policies and alert channels @@ -74,6 +76,7 @@ type AlertChannel struct { Links AlertChannelLinks `json:"links,omitempty"` } +// ApplicationSummary represents performance information about a New Relic application. type ApplicationSummary struct { ResponseTime float64 `json:"response_time"` Throughput float64 `json:"throughput"` @@ -85,6 +88,7 @@ type ApplicationSummary struct { ConcurrentInstanceCount int `json:"concurrent_instance_count"` } +// ApplicationEndUserSummary represents performance information about a New Relic application. type ApplicationEndUserSummary struct { ResponseTime float64 `json:"response_time"` Throughput float64 `json:"throughput"` @@ -92,6 +96,7 @@ type ApplicationEndUserSummary struct { ApdexScore float64 `json:"apdex_score"` } +// ApplicationSettings represents some of the settings of a New Relic application. type ApplicationSettings struct { AppApdexThreshold float64 `json:"app_apdex_threshold,omitempty"` EndUserApdexThreshold float64 `json:"end_user_apdex_threshold,omitempty"` @@ -99,6 +104,7 @@ type ApplicationSettings struct { UseServerSideConfig bool `json:"use_server_side_config,omitempty"` } +// ApplicationLinks represents all the links for a New Relic application. type ApplicationLinks struct { ServerIDs []int `json:"servers,omitempty"` HostIDs []int `json:"application_hosts,omitempty"` @@ -106,6 +112,7 @@ type ApplicationLinks struct { AlertPolicyID int `json:"alert_policy"` } +// Application represents information about a New Relic application. type Application struct { ID int `json:"id,omitempty"` Name string `json:"name,omitempty"` @@ -118,3 +125,83 @@ type Application struct { Settings ApplicationSettings `json:"settings,omitempty"` Links ApplicationLinks `json:"links,omitempty"` } + +// PluginDetails represents information about a New Relic plugin. +type PluginDetails struct { + Description int `json:"description"` + IsPublic string `json:"is_public"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + LastPublishedAt string `json:"last_published_at,omitempty"` + HasUnpublishedChanges bool `json:"has_unpublished_changes"` + BrandingImageURL string `json:"branding_image_url"` + UpgradedAt string `json:"upgraded_at,omitempty"` + ShortName string `json:"short_name"` + PublisherAboutURL string `json:"publisher_about_url"` + PublisherSupportURL string `json:"publisher_support_url"` + DownloadURL string `json:"download_url"` + FirstEditedAt string `json:"first_edited_at,omitempty"` + LastEditedAt string `json:"last_edited_at,omitempty"` + FirstPublishedAt string `json:"first_published_at,omitempty"` + PublishedVersion string `json:"published_version"` +} + +// MetricThreshold represents the different thresholds for a metric in an alert. +type MetricThreshold struct { + Caution float64 `json:"caution"` + Critical float64 `json:"critical"` +} + +// MetricValue represents the observed value of a metric. +type MetricValue struct { + Raw float64 `json:"raw"` + Formatted string `json:"formatted"` +} + +// MetricTimeslice represents the values of a metric over a given time. +type MetricTimeslice struct { + From string `json:"from,omitempty"` + To string `json:"to,omitempty"` + Values map[string]interface{} `json:"values,omitempty"` +} + +// Metric represents data for a specific metric. +type Metric struct { + Name string `json:"name"` + Timeslices []MetricTimeslice `json:"timeslices"` +} + +// SummaryMetric represents summary information for a specific metric. +type SummaryMetric struct { + ID int `json:"id"` + Name string `json:"name"` + Metric string `json:"metric"` + ValueFunction string `json:"value_function"` + Thresholds MetricThreshold `json:"thresholds"` + Values MetricValue `json:"values"` +} + +// Plugin represents information about a New Relic plugin. +type Plugin struct { + ID int `json:"id"` + Name string `json:"name,omitempty"` + GUID string `json:"guid,omitempty"` + Publisher string `json:"publisher,omitempty"` + ComponentAgentCount int `json:"component_agent_count"` + Details PluginDetails `json:"details"` + SummaryMetrics []SummaryMetric `json:"summary_metrics"` +} + +// Component represnets information about a New Relic component. +type Component struct { + ID int `json:"id"` + Name string `json:"name,omitempty"` + HealthStatus string `json:"health_status,omitempty"` + SummaryMetrics []SummaryMetric `json:"summary_metrics"` +} + +// ComponentMetric represents metric information for a specific component. +type ComponentMetric struct { + Name string `json:"name,omitempty"` + Values []string `json:"values"` +} diff --git a/vendor/vendor.json b/vendor/vendor.json index e5c2a1fc9..f15250981 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2733,10 +2733,10 @@ "revisionTime": "2016-08-11T16:27:25Z" }, { - "checksumSHA1": "lOEkLP94OsQSLFp+38rY1GjnMtk=", + "checksumSHA1": "SUK6xOTZDwljlX/AoHAmgoz0e1E=", "path": "github.com/paultyng/go-newrelic/api", - "revision": "81a8e05b0e494285f1322f99f3c6f93c8f1192b1", - "revisionTime": "2016-11-29T00:49:55Z" + "revision": "5fbf16b273dd4b544c9588450c58711d9f46f912", + "revisionTime": "2017-03-27T18:23:21Z" }, { "checksumSHA1": "mUb0GqsJK4UDh3Kx8TobjzvDUG4=", diff --git a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown index 27371e526..6b41f92e9 100644 --- a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown +++ b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown @@ -48,6 +48,7 @@ The following arguments are supported: * `entities` - (Required) The instance IDS associated with this condition. * `metric` - (Required) The metric field accepts parameters based on the `type` set. * `runbook_url` - (Optional) Runbook URL to display in notifications. + * `condition_scope` - (Optional) `instance` or `application`. This is required if you are using the JVM plugin in New Relic. * `term` - (Required) A list of terms for this condition. See [Terms](#terms) below for details. * `user_defined_metric` - (Optional) A custom metric to be evaluated. * `user_defined_value_function` - (Optional) One of: `average`, `min`, `max`, `total`, or `sample_size`. From 82235877d6c81c54291f714b5a074bb38af0468d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 01:59:50 +0300 Subject: [PATCH 169/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32eb8c5ad..f9da72124 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ BUG FIXES: * provider/aws: Remove aws_network_acl_rule if not found [GH-13608] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] + * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] * provider/openstack: Fix updating Ports [GH-13604] ## 0.9.3 (April 12, 2017) From 193c0d80ad9ccd083b1ce6bc79450f28b914c849 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Sun, 16 Apr 2017 22:35:09 -0700 Subject: [PATCH 170/342] provider/aws: Validate GovCloud KMS ARNs Fixes validation errors in GovCloud when passing a KMS ARN for `kms_key_id` in `s3_bucket_object` and `db_instance`. The region is `us-gov-west-1` which breaks the regexp. Just added the optional `gov-` in the right place. --- builtin/providers/aws/validators.go | 2 +- builtin/providers/aws/validators_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go index 7b692ad93..dced0935d 100644 --- a/builtin/providers/aws/validators.go +++ b/builtin/providers/aws/validators.go @@ -353,7 +353,7 @@ func validateArn(v interface{}, k string) (ws []string, errors []error) { } // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^arn:[\w-]+:([a-zA-Z0-9\-])+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:(.*)$` + pattern := `^arn:[\w-]+:([a-zA-Z0-9\-])+:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12})?:(.*)$` if !regexp.MustCompile(pattern).MatchString(value) { errors = append(errors, fmt.Errorf( "%q doesn't look like a valid ARN (%q): %q", diff --git a/builtin/providers/aws/validators_test.go b/builtin/providers/aws/validators_test.go index e676ff880..06c225cac 100644 --- a/builtin/providers/aws/validators_test.go +++ b/builtin/providers/aws/validators_test.go @@ -207,6 +207,7 @@ func TestValidateArn(t *testing.T) { "arn:aws:lambda:eu-west-1:319201112229:function:myCustomFunction", // Lambda function "arn:aws:lambda:eu-west-1:319201112229:function:myCustomFunction:Qualifier", // Lambda func qualifier "arn:aws-us-gov:s3:::corp_bucket/object.png", // GovCloud ARN + "arn:aws-us-gov:kms:us-gov-west-1:123456789012:key/some-uuid-abc123", // GovCloud KMS ARN } for _, v := range validNames { _, errors := validateArn(v, "arn") From e8023941ef32db582be30ac758b7b1bbd99a0262 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Sun, 16 Apr 2017 23:31:29 -0700 Subject: [PATCH 171/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9da72124..deac7d31a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ BUG FIXES: * provider/aws: Use mutex & retry for WAF change operations [GH-13656] * provider/aws: Remove aws_vpc_dhcp_options if not found [GH-13610] * provider/aws: Remove aws_network_acl_rule if not found [GH-13608] + * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes [GH-13699] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] From 137fcfb5de8a426fc74493b05d7a8b033460275b Mon Sep 17 00:00:00 2001 From: Max Riveiro Date: Mon, 17 Apr 2017 13:17:54 +0300 Subject: [PATCH 172/342] Massively add HCL source tag in docs Markdown files Signed-off-by: Max Riveiro --- .../source/docs/commands/index.html.markdown | 4 ++-- .../resource-addressing.html.markdown | 4 +--- .../aws/d/billing_service_account.markdown | 2 +- .../docs/providers/aws/d/eip.html.markdown | 2 +- .../docs/providers/aws/index.html.markdown | 2 +- .../docs/providers/aws/r/alb.html.markdown | 2 +- .../aws/r/alb_listener.html.markdown | 2 +- .../aws/r/alb_listener_rule.html.markdown | 2 +- .../aws/r/alb_target_group.html.markdown | 2 +- .../alb_target_group_attachment.html.markdown | 2 +- .../docs/providers/aws/r/ami.html.markdown | 2 +- .../providers/aws/r/ami_copy.html.markdown | 2 +- .../aws/r/ami_from_instance.html.markdown | 2 +- .../aws/r/ami_launch_permission.html.markdown | 2 +- .../aws/r/api_gateway_account.html.markdown | 2 +- .../aws/r/api_gateway_api_key.html.markdown | 2 +- .../r/api_gateway_authorizer.html.markdown | 2 +- ...pi_gateway_base_path_mapping.html.markdown | 2 +- ...i_gateway_client_certificate.html.markdown | 2 +- .../r/api_gateway_deployment.html.markdown | 2 +- .../r/api_gateway_domain_name.html.markdown | 2 +- .../r/api_gateway_integration.html.markdown | 4 ++-- ...gateway_integration_response.html.markdown | 2 +- .../aws/r/api_gateway_method.html.markdown | 2 +- .../api_gateway_method_response.html.markdown | 2 +- .../aws/r/api_gateway_model.html.markdown | 2 +- .../aws/r/api_gateway_resource.html.markdown | 2 +- .../aws/r/api_gateway_rest_api.html.markdown | 2 +- .../r/api_gateway_usage_plan.html.markdown | 2 +- .../api_gateway_usage_plan_key.html.markdown | 2 +- ...app_cookie_stickiness_policy.html.markdown | 2 +- .../aws/r/appautoscaling_policy.html.markdown | 5 +++-- .../aws/r/appautoscaling_target.html.markdown | 3 ++- .../r/autoscaling_attachment.html.markdown | 4 ++-- .../aws/r/autoscaling_group.html.markdown | 2 +- .../autoscaling_lifecycle_hooks.html.markdown | 2 +- .../r/autoscaling_notification.html.markdown | 2 +- .../aws/r/autoscaling_policy.html.markdown | 5 +++-- .../aws/r/autoscaling_schedule.html.markdown | 3 ++- .../aws/r/cloudformation_stack.html.markdown | 2 +- .../r/cloudfront_distribution.html.markdown | 2 +- ...front_origin_access_identity.html.markdown | 6 +++--- .../providers/aws/r/cloudtrail.html.markdown | 3 ++- .../aws/r/cloudwatch_event_rule.html.markdown | 2 +- .../r/cloudwatch_event_target.html.markdown | 2 +- .../cloudwatch_log_destination.html.markdown | 2 +- ...watch_log_destination_policy.html.markdown | 2 +- .../aws/r/cloudwatch_log_group.html.markdown | 2 +- ...cloudwatch_log_metric_filter.html.markdown | 2 +- .../aws/r/cloudwatch_log_stream.html.markdown | 2 +- ...atch_log_subscription_filter.html.markdown | 2 +- .../r/cloudwatch_metric_alarm.html.markdown | 7 ++++--- .../r/code_commit_repository.html.markdown | 2 +- .../aws/r/code_commit_trigger.html.markdown | 2 +- .../aws/r/codebuild_project.html.markdown | 2 +- .../aws/r/codedeploy_app.html.markdown | 2 +- ...codedeploy_deployment_config.html.markdown | 2 +- .../codedeploy_deployment_group.html.markdown | 2 +- .../providers/aws/r/codepipeline.markdown | 2 +- .../aws/r/config_config_rule.html.markdown | 2 +- ...onfig_configuration_recorder.html.markdown | 2 +- ...onfiguration_recorder_status.html.markdown | 2 +- .../r/config_delivery_channel.html.markdown | 2 +- .../aws/r/customer_gateway.html.markdown | 2 +- .../aws/r/db_event_subscription.html.markdown | 2 +- .../providers/aws/r/db_instance.html.markdown | 2 +- .../aws/r/db_option_group.html.markdown | 2 +- .../aws/r/db_parameter_group.html.markdown | 2 +- .../aws/r/db_security_group.html.markdown | 2 +- .../aws/r/db_subnet_group.html.markdown | 2 +- .../aws/r/default_network_acl.html.markdown | 6 +++--- .../aws/r/default_route_table.html.markdown | 2 +- .../r/default_security_group.html.markdown | 4 ++-- .../directory_service_directory.html.markdown | 2 +- .../aws/r/dms_certificate.html.markdown | 2 +- .../aws/r/dms_endpoint.html.markdown | 2 +- .../r/dms_replication_instance.html.markdown | 2 +- ...dms_replication_subnet_group.html.markdown | 2 +- .../aws/r/dms_replication_task.html.markdown | 2 +- .../aws/r/dynamodb_table.html.markdown | 2 +- .../docs/providers/aws/r/ebs_snapshot.html.md | 2 +- .../docs/providers/aws/r/ebs_volume.html.md | 2 +- .../aws/r/ecr_repository.html.markdown | 2 +- .../aws/r/ecr_repository_policy.html.markdown | 2 +- .../providers/aws/r/ecs_cluster.html.markdown | 2 +- .../providers/aws/r/ecs_service.html.markdown | 2 +- .../aws/r/ecs_task_definition.html.markdown | 2 +- .../aws/r/efs_file_system.html.markdown | 2 +- .../aws/r/efs_mount_target.html.markdown | 2 +- ...egress_only_internet_gateway.html.markdown | 2 +- .../docs/providers/aws/r/eip.html.markdown | 6 +++--- .../aws/r/eip_association.html.markdown | 2 +- ...lastic_beanstalk_application.html.markdown | 2 +- ...eanstalk_application_version.html.markdown | 2 +- ...stalk_configuration_template.html.markdown | 3 +-- ...lastic_beanstalk_environment.html.markdown | 5 ++--- .../elastic_transcoder_pipeline.html.markdown | 2 +- .../r/elastic_transcoder_preset.html.markdown | 2 +- .../aws/r/elasticache_cluster.html.markdown | 2 +- .../elasticache_parameter_group.html.markdown | 2 +- ...lasticache_replication_group.html.markdown | 2 +- .../elasticache_security_group.html.markdown | 2 +- .../r/elasticache_subnet_group.html.markdown | 2 +- .../aws/r/elasticsearch_domain.html.markdown | 2 +- .../elasticsearch_domain_policy.html.markdown | 2 +- .../docs/providers/aws/r/elb.html.markdown | 2 +- .../aws/r/elb_attachment.html.markdown | 2 +- .../docs/providers/aws/r/emr_cluster.html.md | 2 +- .../aws/r/emr_instance_group.html.md | 2 +- .../aws/r/glacier_vault.html.markdown | 2 +- .../aws/r/iam_access_key.html.markdown | 2 +- .../aws/r/iam_account_alias.html.markdown | 2 +- .../iam_account_password_policy.html.markdown | 2 +- .../providers/aws/r/iam_group.html.markdown | 2 +- .../aws/r/iam_group_membership.html.markdown | 2 +- .../aws/r/iam_group_policy.html.markdown | 2 +- .../r/iam_group_policy_attachment.markdown | 2 +- .../aws/r/iam_instance_profile.html.markdown | 2 +- .../iam_openid_connect_provider.html.markdown | 2 +- .../providers/aws/r/iam_policy.html.markdown | 2 +- .../aws/r/iam_policy_attachment.html.markdown | 2 +- .../providers/aws/r/iam_role.html.markdown | 4 ++-- .../aws/r/iam_role_policy.html.markdown | 2 +- .../aws/r/iam_role_policy_attachment.markdown | 2 +- .../aws/r/iam_saml_provider.html.markdown | 2 +- .../r/iam_server_certificate.html.markdown | 7 +++---- .../providers/aws/r/iam_user.html.markdown | 2 +- .../r/iam_user_login_profile.html.markdown | 2 +- .../aws/r/iam_user_policy.html.markdown | 2 +- .../aws/r/iam_user_policy_attachment.markdown | 2 +- .../aws/r/iam_user_ssh_key.html.markdown | 2 +- .../inspector_assessment_target.html.markdown | 2 +- ...nspector_assessment_template.html.markdown | 2 +- .../r/inspector_resource_group.html.markdown | 2 +- .../providers/aws/r/instance.html.markdown | 2 +- .../aws/r/internet_gateway.html.markdown | 2 +- .../providers/aws/r/key_pair.html.markdown | 2 +- ...sis_firehose_delivery_stream.html.markdown | 9 ++++----- .../aws/r/kinesis_stream.html.markdown | 2 +- .../providers/aws/r/kms_alias.html.markdown | 2 +- .../providers/aws/r/kms_key.html.markdown | 2 +- .../aws/r/lambda_alias.html.markdown | 2 +- .../lambda_event_source_mapping.html.markdown | 2 +- .../aws/r/lambda_function.html.markdown | 2 +- .../aws/r/lambda_permission.html.markdown | 4 ++-- .../aws/r/launch_configuration.html.markdown | 6 +++--- .../lb_cookie_stickiness_policy.html.markdown | 2 +- .../r/lb_ssl_negotiation_policy.html.markdown | 2 +- .../aws/r/lightsail_domain.html.markdown | 2 +- .../aws/r/lightsail_instance.html.markdown | 2 +- .../aws/r/lightsail_key_pair.html.markdown | 6 +++--- .../aws/r/lightsail_static_ip.html.markdown | 2 +- ...ghtsail_static_ip_attachment.html.markdown | 2 +- ...lancer_backend_server_policy.html.markdown | 4 ++-- ...oad_balancer_listener_policy.html.markdown | 2 +- .../aws/r/load_balancer_policy.html.markdown | 4 ++-- .../r/main_route_table_assoc.html.markdown | 2 +- .../providers/aws/r/nat_gateway.html.markdown | 2 +- .../providers/aws/r/network_acl.html.markdown | 2 +- .../aws/r/network_acl_rule.html.markdown | 2 +- .../aws/r/network_interface.markdown | 2 +- .../aws/r/opsworks_application.html.markdown | 2 +- .../aws/r/opsworks_custom_layer.html.markdown | 3 +-- .../r/opsworks_ganglia_layer.html.markdown | 2 +- .../r/opsworks_haproxy_layer.html.markdown | 2 +- .../aws/r/opsworks_instance.html.markdown | 2 +- .../r/opsworks_java_app_layer.html.markdown | 2 +- .../r/opsworks_memcached_layer.html.markdown | 2 +- .../aws/r/opsworks_mysql_layer.html.markdown | 2 +- .../r/opsworks_nodejs_app_layer.html.markdown | 2 +- .../aws/r/opsworks_permission.html.markdown | 2 +- .../r/opsworks_php_app_layer.html.markdown | 2 +- .../r/opsworks_rails_app_layer.html.markdown | 2 +- .../r/opsworks_rds_db_instance.html.markdown | 2 +- .../aws/r/opsworks_stack.html.markdown | 3 +-- .../r/opsworks_static_web_layer.html.markdown | 2 +- .../aws/r/opsworks_user_profile.html.markdown | 2 +- .../aws/r/placement_group.html.markdown | 2 +- .../aws/r/proxy_protocol_policy.html.markdown | 2 +- .../providers/aws/r/rds_cluster.html.markdown | 3 +-- .../aws/r/rds_cluster_instance.html.markdown | 2 +- .../r/rds_cluster_parameter_group.markdown | 2 +- .../aws/r/redshift_cluster.html.markdown | 2 +- .../r/redshift_parameter_group.html.markdown | 2 +- .../r/redshift_security_group.html.markdown | 2 +- .../aws/r/redshift_subnet_group.html.markdown | 2 +- .../docs/providers/aws/r/route.html.markdown | 4 ++-- .../r/route53_delegation_set.html.markdown | 2 +- .../aws/r/route53_health_check.html.markdown | 4 ++-- .../aws/r/route53_record.html.markdown | 6 +++--- .../aws/r/route53_zone.html.markdown | 4 ++-- .../r/route53_zone_association.html.markdown | 2 +- .../providers/aws/r/route_table.html.markdown | 2 +- .../r/route_table_association.html.markdown | 2 +- .../providers/aws/r/s3_bucket.html.markdown | 14 ++++++------- .../r/s3_bucket_notification.html.markdown | 12 +++++------ .../aws/r/s3_bucket_object.html.markdown | 6 +++--- .../aws/r/s3_bucket_policy.html.markdown | 2 +- .../aws/r/security_group.html.markdown | 2 +- .../aws/r/security_group_rule.html.markdown | 4 ++-- .../ses_active_receipt_rule_set.html.markdown | 2 +- .../aws/r/ses_configuration_set.markdown | 2 +- .../aws/r/ses_domain_identity.html.markdown | 2 +- .../aws/r/ses_event_destination.markdown | 2 +- .../aws/r/ses_receipt_filter.html.markdown | 2 +- .../aws/r/ses_receipt_rule.html.markdown | 2 +- .../aws/r/ses_receipt_rule_set.html.markdown | 2 +- .../aws/r/sfn_activity.html.markdown | 2 +- .../aws/r/sfn_state_machine.html.markdown | 2 +- .../aws/r/simpledb_domain.html.markdown | 2 +- ...hot_create_volume_permission.html.markdown | 2 +- .../providers/aws/r/sns_topic.html.markdown | 2 +- .../aws/r/sns_topic_policy.html.markdown | 2 +- .../r/sns_topic_subscription.html.markdown | 9 ++++----- .../spot_datafeed_subscription.html.markdown | 3 +-- .../aws/r/spot_fleet_request.html.markdown | 5 ++--- .../aws/r/spot_instance_request.html.markdown | 2 +- .../providers/aws/r/sqs_queue.html.markdown | 4 ++-- .../aws/r/sqs_queue_policy.html.markdown | 2 +- .../aws/r/ssm_activation.html.markdown | 2 +- .../aws/r/ssm_association.html.markdown | 2 +- .../aws/r/ssm_document.html.markdown | 2 +- .../docs/providers/aws/r/subnet.html.markdown | 4 +--- .../aws/r/volume_attachment.html.markdown | 2 +- .../docs/providers/aws/r/vpc.html.markdown | 4 ++-- .../aws/r/vpc_dhcp_options.html.markdown | 4 ++-- ...vpc_dhcp_options_association.html.markdown | 2 +- .../aws/r/vpc_endpoint.html.markdown | 3 +-- ...oint_route_table_association.html.markdown | 2 +- .../providers/aws/r/vpc_peering.html.markdown | 6 +++--- .../aws/r/vpc_peering_accepter.html.markdown | 2 +- .../aws/r/vpn_connection.html.markdown | 2 +- .../aws/r/vpn_connection_route.html.markdown | 2 +- .../providers/aws/r/vpn_gateway.html.markdown | 2 +- .../r/vpn_gateway_attachment.html.markdown | 2 +- .../aws/r/waf_byte_match_set.html.markdown | 2 +- .../providers/aws/r/waf_ipset.html.markdown | 2 +- .../providers/aws/r/waf_rule.html.markdown | 2 +- .../r/waf_size_constraint_set.html.markdown | 2 +- .../waf_sql_injection_match_set.html.markdown | 2 +- .../providers/aws/r/waf_web_acl.html.markdown | 2 +- .../aws/r/waf_xss_match_set.html.markdown | 2 +- .../docs/providers/azure/index.html.markdown | 2 +- .../azure/r/affinity_group.html.markdown | 2 +- .../providers/azure/r/data_disk.html.markdown | 2 +- .../azure/r/dns_server.html.markdown | 2 +- .../azure/r/hosted_service.html.markdown | 2 +- .../providers/azure/r/instance.html.markdown | 2 +- .../r/local_network_connection.html.markdown | 2 +- .../azure/r/security_group.html.markdown | 2 +- .../azure/r/security_group_rule.html.markdown | 2 +- .../azure/r/sql_database_server.html.markdown | 2 +- ...atabase_server_firewall_rule.html.markdown | 2 +- .../r/sql_database_service.html.markdown | 2 +- .../azure/r/storage_blob.html.markdown | 4 ++-- .../azure/r/storage_container.html.markdown | 4 ++-- .../azure/r/storage_queue.html.markdown | 4 ++-- .../azure/r/storage_service.html.markdown | 4 ++-- .../azure/r/virtual_network.html.markdown | 2 +- .../azurerm/d/client_config.html.markdown | 2 +- .../providers/azurerm/index.html.markdown | 2 +- .../azurerm/r/availability_set.html.markdown | 2 +- .../azurerm/r/cdn_endpoint.html.markdown | 2 +- .../azurerm/r/cdn_profile.html.markdown | 4 ++-- .../r/container_registry.html.markdown | 2 +- .../azurerm/r/container_service.html.markdown | 8 +++++--- .../azurerm/r/dns_a_record.html.markdown | 3 ++- .../azurerm/r/dns_aaaa_record.html.markdown | 3 ++- .../azurerm/r/dns_cname_record.html.markdown | 3 ++- .../azurerm/r/dns_mx_record.html.markdown | 2 +- .../azurerm/r/dns_ns_record.html.markdown | 2 +- .../azurerm/r/dns_srv_record.html.markdown | 2 +- .../azurerm/r/dns_txt_record.html.markdown | 2 +- .../azurerm/r/dns_zone.html.markdown | 2 +- .../azurerm/r/eventhub.html.markdown | 2 +- .../eventhub_authorization_rule.html.markdown | 3 +-- .../r/eventhub_consumer_group.html.markdown | 2 +- .../r/eventhub_namespace.html.markdown | 2 +- .../azurerm/r/key_vault.html.markdown | 2 +- .../azurerm/r/loadbalancer.html.markdown | 2 +- ...alancer_backend_address_pool.html.markdown | 2 +- .../r/loadbalancer_nat_pool.html.markdown | 2 +- .../r/loadbalancer_nat_rule.html.markdown | 2 +- .../r/loadbalancer_probe.html.markdown | 2 +- .../azurerm/r/loadbalancer_rule.html.markdown | 2 +- .../r/local_network_gateway.html.markdown | 2 +- .../azurerm/r/managed_disk.html.markdown | 4 ++-- .../azurerm/r/network_interface.html.markdown | 2 +- .../r/network_security_group.html.markdown | 2 +- .../r/network_security_rule.html.markdown | 2 +- .../azurerm/r/public_ip.html.markdown | 2 +- .../azurerm/r/redis_cache.html.markdown | 10 ++++++---- .../azurerm/r/resource_group.html.markdown | 2 +- .../providers/azurerm/r/route.html.markdown | 2 +- .../azurerm/r/route_table.html.markdown | 2 +- .../azurerm/r/search_service.html.markdown | 2 +- .../r/servicebus_namespace.html.markdown | 2 +- .../r/servicebus_subscription.html.markdown | 2 +- .../azurerm/r/servicebus_topic.html.markdown | 2 +- .../azurerm/r/sql_database.html.markdown | 2 +- .../azurerm/r/sql_firewall_rule.html.markdown | 2 +- .../azurerm/r/sql_server.html.markdown | 2 +- .../azurerm/r/storage_account.html.markdown | 2 +- .../azurerm/r/storage_blob.html.markdown | 2 +- .../azurerm/r/storage_container.html.markdown | 2 +- .../azurerm/r/storage_queue.html.markdown | 2 +- .../azurerm/r/storage_share.html.markdown | 2 +- .../azurerm/r/storage_table.html.markdown | 2 +- .../providers/azurerm/r/subnet.html.markdown | 2 +- .../r/template_deployment.html.markdown | 2 +- .../r/traffic_manager_endpoint.html.markdown | 2 +- .../r/traffic_manager_profile.html.markdown | 2 +- .../azurerm/r/virtual_machine.html.markdown | 8 ++++---- .../r/virtual_machine_extension.html.markdown | 2 +- .../virtual_machine_scale_sets.html.markdown | 2 +- .../azurerm/r/virtual_network.html.markdown | 2 +- .../r/virtual_network_peering.html.markdown | 2 +- .../providers/bitbucket/index.html.markdown | 2 +- .../r/default_reviewers.html.markdown | 2 +- .../providers/bitbucket/r/hook.html.markdown | 2 +- .../bitbucket/r/repository.html.markdown | 2 +- .../docs/providers/do/index.html.markdown | 2 +- .../docs/providers/do/r/domain.html.markdown | 2 +- .../docs/providers/do/r/droplet.html.markdown | 2 +- .../providers/do/r/floating_ip.html.markdown | 2 +- .../providers/do/r/loadbalancer.html.markdown | 2 +- .../docs/providers/do/r/record.html.markdown | 2 +- .../docs/providers/do/r/ssh_key.html.markdown | 2 +- .../docs/providers/do/r/tag.html.markdown | 2 +- .../docs/providers/do/r/volume.markdown | 2 +- .../docker/d/registry_image.html.markdown | 2 +- .../docs/providers/docker/index.html.markdown | 2 +- .../docker/r/container.html.markdown | 4 ++-- .../providers/docker/r/image.html.markdown | 4 ++-- .../providers/docker/r/network.html.markdown | 2 +- .../providers/docker/r/volume.html.markdown | 2 +- .../docs/providers/dyn/index.html.markdown | 2 +- .../docs/providers/dyn/r/record.html.markdown | 2 +- .../docs/providers/google/index.html.markdown | 2 +- .../google/r/compute_address.html.markdown | 2 +- .../google/r/compute_autoscaler.html.markdown | 2 +- .../r/compute_backend_service.html.markdown | 2 +- .../google/r/compute_disk.html.markdown | 2 +- .../google/r/compute_firewall.html.markdown | 2 +- .../r/compute_global_address.html.markdown | 2 +- ...mpute_global_forwarding_rule.html.markdown | 2 +- .../r/compute_http_health_check.html.markdown | 2 +- .../compute_https_health_check.html.markdown | 2 +- .../google/r/compute_image.html.markdown | 2 +- .../google/r/compute_instance.html.markdown | 2 +- .../r/compute_instance_group.html.markdown | 6 ++++-- ...mpute_instance_group_manager.html.markdown | 2 +- .../r/compute_instance_template.html.markdown | 4 ++-- .../google/r/compute_network.html.markdown | 2 +- .../r/compute_project_metadata.html.markdown | 2 +- .../google/r/compute_route.html.markdown | 2 +- .../r/compute_ssl_certificate.html.markdown | 2 +- .../google/r/compute_subnetwork.html.markdown | 2 +- .../r/compute_target_http_proxy.html.markdown | 2 +- .../compute_target_https_proxy.html.markdown | 2 +- .../r/compute_target_pool.html.markdown | 2 +- .../google/r/compute_url_map.html.markdown | 2 +- .../r/compute_vpn_gateway.html.markdown | 2 +- .../google/r/compute_vpn_tunnel.html.markdown | 2 +- .../google/r/container_cluster.html.markdown | 2 +- .../r/container_node_pool.html.markdown | 2 +- .../google/r/dns_managed_zone.markdown | 2 +- .../google/r/dns_record_set.markdown | 2 +- .../google/r/google_project.html.markdown | 2 +- .../r/google_project_iam_policy.html.markdown | 2 +- .../r/google_project_services.html.markdown | 2 +- .../r/google_service_account.html.markdown | 2 +- .../r/pubsub_subscription.html.markdown | 2 +- .../google/r/pubsub_topic.html.markdown | 2 +- .../google/r/sql_database.html.markdown | 2 +- .../r/sql_database_instance.html.markdown | 2 +- .../providers/google/r/sql_user.html.markdown | 2 +- .../google/r/storage_bucket.html.markdown | 2 +- .../google/r/storage_bucket_acl.html.markdown | 2 +- .../r/storage_bucket_object.html.markdown | 2 +- .../google/r/storage_object_acl.html.markdown | 2 +- .../providers/grafana/index.html.markdown | 2 +- .../providers/grafana/r/dashboard.html.md | 4 ++-- .../providers/grafana/r/data_source.html.md | 2 +- .../docs/providers/heroku/index.html.markdown | 2 +- .../providers/heroku/r/addon.html.markdown | 2 +- .../docs/providers/heroku/r/app.html.markdown | 2 +- .../providers/heroku/r/cert.html.markdown | 2 +- .../providers/heroku/r/domain.html.markdown | 2 +- .../providers/heroku/r/drain.html.markdown | 2 +- .../providers/icinga2/index.html.markdown | 8 ++++---- .../icinga2/r/checkcommands.html.markdown | 2 +- .../providers/icinga2/r/host.html.markdown | 2 +- .../icinga2/r/hostgroup.html.markdown | 2 +- .../providers/icinga2/r/service.html.markdown | 2 +- .../docs/providers/ignition/d/config.html.md | 2 +- .../docs/providers/ignition/d/disk.html.md | 2 +- .../docs/providers/ignition/d/file.html.md | 5 ++--- .../providers/ignition/d/filesystem.html.md | 2 +- .../docs/providers/ignition/d/group.html.md | 2 +- .../ignition/d/networkd_unit.html.md | 2 +- .../docs/providers/ignition/d/raid.html.md | 2 +- .../providers/ignition/d/systemd_unit.html.md | 2 +- .../docs/providers/ignition/d/user.html.md | 2 +- .../providers/ignition/index.html.markdown | 2 +- .../providers/influxdb/index.html.markdown | 2 +- .../influxdb/r/continuous_query.html.md | 2 +- .../providers/influxdb/r/database.html.md | 2 +- .../docs/providers/influxdb/r/user.html.md | 2 +- .../providers/librato/index.html.markdown | 2 +- .../providers/librato/r/alert.html.markdown | 2 +- .../providers/librato/r/service.html.markdown | 2 +- .../providers/librato/r/space.html.markdown | 2 +- .../librato/r/space_chart.html.markdown | 2 +- .../providers/logentries/index.html.markdown | 2 +- .../providers/logentries/r/log.html.markdown | 2 +- .../logentries/r/logset.html.markdown | 2 +- .../providers/mailgun/index.html.markdown | 2 +- .../providers/mailgun/r/domain.html.markdown | 2 +- .../newrelic/d/application.html.markdown | 2 +- .../providers/newrelic/index.html.markdown | 2 +- .../newrelic/r/alert_channel.html.markdown | 2 +- .../newrelic/r/alert_condition.html.markdown | 2 +- .../newrelic/r/alert_policy.html.markdown | 2 +- .../r/alert_policy_channel.html.markdown | 2 +- .../docs/providers/ns1/index.html.markdown | 2 +- .../docs/providers/ns1/r/apikey.html.markdown | 2 +- .../providers/ns1/r/datafeed.html.markdown | 2 +- .../providers/ns1/r/datasource.html.markdown | 2 +- .../ns1/r/monitoringjob.html.markdown | 2 +- .../providers/ns1/r/notifylist.html.markdown | 2 +- .../docs/providers/ns1/r/record.html.markdown | 2 +- .../docs/providers/ns1/r/team.html.markdown | 2 +- .../docs/providers/ns1/r/user.html.markdown | 2 +- .../docs/providers/ns1/r/zone.html.markdown | 2 +- .../openstack/d/images_image_v2.html.markdown | 2 +- .../d/networking_network_v2.html.markdown | 2 +- .../providers/openstack/index.html.markdown | 2 +- ...lockstorage_volume_attach_v2.html.markdown | 2 +- .../r/blockstorage_volume_v1.html.markdown | 2 +- .../r/blockstorage_volume_v2.html.markdown | 2 +- ...pute_floatingip_associate_v2.html.markdown | 4 ++-- .../r/compute_floatingip_v2.html.markdown | 2 +- .../r/compute_instance_v2.html.markdown | 20 +++++++++---------- .../r/compute_keypair_v2.html.markdown | 2 +- .../r/compute_secgroup_v2.html.markdown | 6 +++--- .../r/compute_servergroup_v2.html.markdown | 2 +- .../r/compute_volume_attach_v2.html.markdown | 2 +- .../openstack/r/fw_firewall_v1.html.markdown | 2 +- .../openstack/r/fw_policy_v1.html.markdown | 2 +- .../openstack/r/fw_rule_v1.html.markdown | 2 +- .../openstack/r/images_image_v2.html.markdown | 2 +- .../openstack/r/lb_listener_v2.html.markdown | 2 +- .../r/lb_loadbalancer_v2.html.markdown | 2 +- .../openstack/r/lb_member_v1.html.markdown | 2 +- .../openstack/r/lb_member_v2.html.markdown | 2 +- .../openstack/r/lb_monitor_v1.html.markdown | 2 +- .../openstack/r/lb_monitor_v2.html.markdown | 2 +- .../openstack/r/lb_pool_v1.html.markdown | 2 +- .../openstack/r/lb_pool_v2.html.markdown | 2 +- .../openstack/r/lb_vip_v1.html.markdown | 2 +- .../r/networking_floatingip_v2.html.markdown | 2 +- .../r/networking_network_v2.html.markdown | 2 +- .../r/networking_port_v2.html.markdown | 2 +- ...tworking_router_interface_v2.html.markdown | 2 +- .../networking_router_route_v2.html.markdown | 2 +- .../r/networking_router_v2.html.markdown | 2 +- .../networking_secgroup_rule_v2.html.markdown | 2 +- .../r/networking_secgroup_v2.html.markdown | 4 ++-- .../r/networking_subnet_v2.html.markdown | 2 +- .../objectstorage_container_v1.html.markdown | 2 +- .../providers/opsgenie/d/user.html.markdown | 2 +- .../providers/opsgenie/index.html.markdown | 2 +- .../providers/opsgenie/r/team.html.markdown | 2 +- .../providers/opsgenie/r/user.html.markdown | 2 +- .../docs/providers/packet/index.html.markdown | 2 +- .../providers/packet/r/device.html.markdown | 2 +- .../providers/packet/r/project.html.markdown | 2 +- .../providers/packet/r/ssh_key.html.markdown | 2 +- .../providers/packet/r/volume.html.markdown | 2 +- .../d/escalation_policy.html.markdown | 2 +- .../pagerduty/d/schedule.html.markdown | 2 +- .../providers/pagerduty/d/user.html.markdown | 2 +- .../pagerduty/d/vendor.html.markdown | 2 +- .../providers/pagerduty/index.html.markdown | 2 +- .../providers/pagerduty/r/addon.html.markdown | 2 +- .../r/escalation_policy.html.markdown | 2 +- .../pagerduty/r/schedule.html.markdown | 2 +- .../pagerduty/r/service.html.markdown | 4 ++-- .../r/service_integration.html.markdown | 2 +- .../providers/pagerduty/r/team.html.markdown | 2 +- .../providers/pagerduty/r/user.html.markdown | 2 +- .../providers/postgresql/index.html.markdown | 4 ++-- .../r/postgresql_database.html.markdown | 4 ++-- .../r/postgresql_extension.html.markdown | 2 +- .../r/postgresql_role.html.markdown | 4 ++-- .../r/postgresql_schema.html.markdown | 4 ++-- .../providers/powerdns/index.html.markdown | 2 +- .../providers/powerdns/r/record.html.markdown | 4 ++-- .../d/profitbricks_datacenter.html.markdown | 2 +- .../d/profitbricks_image.html.markdown | 2 +- .../d/profitbricks_location.html.markdown | 2 +- .../profitbricks/index.html.markdown | 4 ++-- .../r/profitbricks_datacenter.html.markdown | 2 +- .../r/profitbricks_firewall.html.markdown | 2 +- .../r/profitbricks_ipblock.html.markdown | 2 +- .../r/profitbricks_lan.html.markdown | 2 +- .../r/profitbricks_loadbalancer.html.markdown | 2 +- .../r/profitbricks_nic.html.markdown | 2 +- .../r/profitbricks_server.html.markdown | 2 +- .../r/profitbricks_volume.html.markdown | 2 +- .../providers/rabbitmq/index.html.markdown | 2 +- .../rabbitmq/r/binding.html.markdown | 4 ++-- .../rabbitmq/r/exchange.html.markdown | 2 +- .../rabbitmq/r/permissions.html.markdown | 2 +- .../providers/rabbitmq/r/policy.html.markdown | 2 +- .../providers/rabbitmq/r/queue.html.markdown | 2 +- .../providers/rabbitmq/r/user.html.markdown | 2 +- .../providers/rabbitmq/r/vhost.html.markdown | 2 +- .../scaleway/d/bootscript.html.markdown | 2 +- .../providers/scaleway/d/image.html.markdown | 2 +- .../providers/scaleway/r/ip.html.markdown | 2 +- .../scaleway/r/security_group.html.markdown | 2 +- .../r/security_group_rule.html.markdown | 2 +- .../providers/scaleway/r/server.html.markdown | 2 +- .../providers/scaleway/r/volume.html.markdown | 2 +- .../r/volume_attachment.html.markdown | 2 +- .../softlayer/r/ssh_key.html.markdown | 2 +- .../providers/spotinst/index.html.markdown | 2 +- .../spotinst/r/aws_group.html.markdown | 2 +- .../providers/statuscake/index.html.markdown | 2 +- .../providers/statuscake/r/test.html.markdown | 2 +- .../docs/providers/triton/index.html.markdown | 2 +- .../triton/r/triton_fabric.html.markdown | 11 +--------- .../r/triton_firewall_rule.html.markdown | 13 +++--------- .../triton/r/triton_key.html.markdown | 4 +--- .../triton/r/triton_machine.html.markdown | 7 ++----- .../triton/r/triton_vlan.html.markdown | 3 +-- .../providers/ultradns/index.html.markdown | 2 +- .../ultradns/r/dirpool.html.markdown | 3 ++- .../ultradns/r/probe_http.html.markdown | 3 ++- .../ultradns/r/probe_ping.html.markdown | 3 ++- .../providers/ultradns/r/record.html.markdown | 2 +- .../providers/ultradns/r/tcpool.html.markdown | 3 ++- .../docs/providers/vcd/index.html.markdown | 2 +- .../docs/providers/vcd/r/dnat.html.markdown | 2 +- .../vcd/r/firewall_rules.html.markdown | 2 +- .../providers/vcd/r/network.html.markdown | 2 +- .../docs/providers/vcd/r/snat.html.markdown | 2 +- .../docs/providers/vcd/r/vapp.html.markdown | 2 +- .../providers/vsphere/index.html.markdown | 2 +- .../providers/vsphere/r/file.html.markdown | 4 ++-- .../providers/vsphere/r/folder.html.markdown | 2 +- .../vsphere/r/virtual_disk.html.markdown | 2 +- .../vsphere/r/virtual_machine.html.markdown | 4 ++-- 555 files changed, 679 insertions(+), 699 deletions(-) diff --git a/website/source/docs/commands/index.html.markdown b/website/source/docs/commands/index.html.markdown index 7d95166db..faff71bff 100644 --- a/website/source/docs/commands/index.html.markdown +++ b/website/source/docs/commands/index.html.markdown @@ -19,7 +19,7 @@ most likely expect. To view a list of the available commands at any time, just run terraform with no arguments: -``` +```text $ terraform Usage: terraform [--version] [--help] [args] @@ -58,7 +58,7 @@ All other commands: To get help for any specific command, pass the -h flag to the relevant subcommand. For example, to see help about the graph subcommand: -``` +```text $ terraform graph -h Usage: terraform graph [options] PATH diff --git a/website/source/docs/internals/resource-addressing.html.markdown b/website/source/docs/internals/resource-addressing.html.markdown index d208475c0..a6925382a 100644 --- a/website/source/docs/internals/resource-addressing.html.markdown +++ b/website/source/docs/internals/resource-addressing.html.markdown @@ -48,7 +48,7 @@ resource_type.resource_name[N] Given a Terraform config that includes: -``` +```hcl resource "aws_instance" "web" { # ... count = 4 @@ -57,7 +57,6 @@ resource "aws_instance" "web" { An address like this: - ``` aws_instance.web[3] ``` @@ -68,5 +67,4 @@ Refers to only the last instance in the config, and an address like this: aws_instance.web ``` - Refers to all four "web" instances. diff --git a/website/source/docs/providers/aws/d/billing_service_account.markdown b/website/source/docs/providers/aws/d/billing_service_account.markdown index 2771f6c92..aa6da6550 100644 --- a/website/source/docs/providers/aws/d/billing_service_account.markdown +++ b/website/source/docs/providers/aws/d/billing_service_account.markdown @@ -12,7 +12,7 @@ Use this data source to get the Account ID of the [AWS Billing and Cost Manageme ## Example Usage -``` +```hcl data "aws_billing_service_account" "main" {} resource "aws_s3_bucket" "billing_logs" { diff --git a/website/source/docs/providers/aws/d/eip.html.markdown b/website/source/docs/providers/aws/d/eip.html.markdown index e84669258..a9d5494de 100644 --- a/website/source/docs/providers/aws/d/eip.html.markdown +++ b/website/source/docs/providers/aws/d/eip.html.markdown @@ -18,7 +18,7 @@ public IP as an input variable and needs to determine the other. The following example shows how one might accept a public IP as a variable and use this data source to obtain the allocation ID. -``` +```hcl variable "instance_id" {} variable "public_ip" {} diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown index 958888c92..429039d5a 100644 --- a/website/source/docs/providers/aws/index.html.markdown +++ b/website/source/docs/providers/aws/index.html.markdown @@ -48,7 +48,7 @@ AWS provider block: Usage: -``` +```hcl provider "aws" { region = "us-west-2" access_key = "anaccesskey" diff --git a/website/source/docs/providers/aws/r/alb.html.markdown b/website/source/docs/providers/aws/r/alb.html.markdown index 62a446c8f..7ac9c943c 100644 --- a/website/source/docs/providers/aws/r/alb.html.markdown +++ b/website/source/docs/providers/aws/r/alb.html.markdown @@ -16,7 +16,7 @@ thing. ## Example Usage -``` +```hcl # Create a new load balancer resource "aws_alb" "test" { name = "test-alb-tf" diff --git a/website/source/docs/providers/aws/r/alb_listener.html.markdown b/website/source/docs/providers/aws/r/alb_listener.html.markdown index 479393c77..53e75d795 100644 --- a/website/source/docs/providers/aws/r/alb_listener.html.markdown +++ b/website/source/docs/providers/aws/r/alb_listener.html.markdown @@ -12,7 +12,7 @@ Provides an Application Load Balancer Listener resource. ## Example Usage -``` +```hcl # Create a new load balancer resource "aws_alb" "front_end" { # ... diff --git a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown b/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown index a868bed44..50de244d0 100644 --- a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown +++ b/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown @@ -12,7 +12,7 @@ Provides an Application Load Balancer Listener Rule resource. ## Example Usage -``` +```hcl # Create a new load balancer resource "aws_alb" "front_end" { # ... diff --git a/website/source/docs/providers/aws/r/alb_target_group.html.markdown b/website/source/docs/providers/aws/r/alb_target_group.html.markdown index afaa460ba..c554a06d5 100644 --- a/website/source/docs/providers/aws/r/alb_target_group.html.markdown +++ b/website/source/docs/providers/aws/r/alb_target_group.html.markdown @@ -14,7 +14,7 @@ resources. ## Example Usage -``` +```hcl resource "aws_alb_target_group" "test" { name = "tf-example-alb-tg" port = 80 diff --git a/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown b/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown index 547f4c991..a0cac7241 100644 --- a/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown @@ -14,7 +14,7 @@ target group ## Example Usage -``` +```hcl resource "aws_alb_target_group_attachment" "test" { target_group_arn = "${aws_alb_target_group.test.arn}" target_id = "${aws_instance.test.id}" diff --git a/website/source/docs/providers/aws/r/ami.html.markdown b/website/source/docs/providers/aws/r/ami.html.markdown index b275289d2..7c1a64298 100644 --- a/website/source/docs/providers/aws/r/ami.html.markdown +++ b/website/source/docs/providers/aws/r/ami.html.markdown @@ -19,7 +19,7 @@ it's better to use `aws_ami_launch_permission` instead. ## Example Usage -``` +```hcl # Create an AMI that will start a machine whose root device is backed by # an EBS volume populated from a snapshot. It is assumed that such a snapshot # already exists with the id "snap-xxxxxxxx". diff --git a/website/source/docs/providers/aws/r/ami_copy.html.markdown b/website/source/docs/providers/aws/r/ami_copy.html.markdown index e5a31015d..67db233f0 100644 --- a/website/source/docs/providers/aws/r/ami_copy.html.markdown +++ b/website/source/docs/providers/aws/r/ami_copy.html.markdown @@ -22,7 +22,7 @@ block until the new AMI is available for use on new instances. ## Example Usage -``` +```hcl resource "aws_ami_copy" "example" { name = "terraform-example" description = "A copy of ami-xxxxxxxx" diff --git a/website/source/docs/providers/aws/r/ami_from_instance.html.markdown b/website/source/docs/providers/aws/r/ami_from_instance.html.markdown index 6dd088dcb..806cc3b3e 100644 --- a/website/source/docs/providers/aws/r/ami_from_instance.html.markdown +++ b/website/source/docs/providers/aws/r/ami_from_instance.html.markdown @@ -28,7 +28,7 @@ to produce a fresh snapshot. ## Example Usage -``` +```hcl resource "aws_ami_from_instance" "example" { name = "terraform-example" source_instance_id = "i-xxxxxxxx" diff --git a/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown b/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown index 2758af0fc..838d4fb17 100644 --- a/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown +++ b/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown @@ -12,7 +12,7 @@ Adds launch permission to Amazon Machine Image (AMI) from another AWS account. ## Example Usage -``` +```hcl resource "aws_ami_launch_permission" "example" { image_id = "ami-12345678" account_id = "123456789012" diff --git a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown b/website/source/docs/providers/aws/r/api_gateway_account.html.markdown index 58951c5fd..3a6bf993b 100644 --- a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_account.html.markdown @@ -14,7 +14,7 @@ Provides a settings of an API Gateway Account. Settings is applied region-wide p ## Example Usage -``` +```hcl resource "aws_api_gateway_account" "demo" { cloudwatch_role_arn = "${aws_iam_role.cloudwatch.arn}" } diff --git a/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown b/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown index ad7e8413f..75ca5509f 100644 --- a/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown @@ -14,7 +14,7 @@ Provides an API Gateway API Key. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" } diff --git a/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown b/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown index 39fb5a881..366bb6202 100644 --- a/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway Authorizer. ## Example Usage -``` +```hcl resource "aws_api_gateway_authorizer" "demo" { name = "demo" rest_api_id = "${aws_api_gateway_rest_api.demo.id}" diff --git a/website/source/docs/providers/aws/r/api_gateway_base_path_mapping.html.markdown b/website/source/docs/providers/aws/r/api_gateway_base_path_mapping.html.markdown index 6b6dc93bf..ef9aca929 100644 --- a/website/source/docs/providers/aws/r/api_gateway_base_path_mapping.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_base_path_mapping.html.markdown @@ -14,7 +14,7 @@ custom domain name. ## Example Usage -``` +```hcl resource "aws_api_gateway_deployment" "example" { # See aws_api_gateway_rest_api_docs for how to create this rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}" diff --git a/website/source/docs/providers/aws/r/api_gateway_client_certificate.html.markdown b/website/source/docs/providers/aws/r/api_gateway_client_certificate.html.markdown index 2ca05a948..7e99ef63b 100644 --- a/website/source/docs/providers/aws/r/api_gateway_client_certificate.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_client_certificate.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway Client Certificate. ## Example Usage -``` +```hcl resource "aws_api_gateway_client_certificate" "demo" { description = "My client certificate" } diff --git a/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown b/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown index 4ad0a5950..b3ce05046 100644 --- a/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown @@ -15,7 +15,7 @@ you might need to add an explicit `depends_on = ["aws_api_gateway_integration.na ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown b/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown index a83862b4c..10da7b09a 100644 --- a/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown @@ -27,7 +27,7 @@ traditional CNAME) to the Cloudfront domain name exported in the ## Example Usage -``` +```hcl resource "aws_api_gateway_domain_name" "example" { domain_name = "api.example.com" diff --git a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown index cbb2102bb..836c55f46 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown @@ -12,7 +12,7 @@ Provides an HTTP Method Integration for an API Gateway Integration. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" @@ -54,7 +54,7 @@ EOF ## Lambda integration -``` +```hcl # Variables variable "myregion" {} variable "accountId" {} diff --git a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown index 203bd99c0..c04bbc023 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown @@ -15,7 +15,7 @@ you might need to add an explicit `depends_on` for clean runs. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown index 3445f5ca0..fd87fb850 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown @@ -12,7 +12,7 @@ Provides a HTTP Method for an API Gateway Resource. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown index 7b112615c..7e6041ab8 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown @@ -12,7 +12,7 @@ Provides an HTTP Method Response for an API Gateway Resource. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_model.html.markdown b/website/source/docs/providers/aws/r/api_gateway_model.html.markdown index 6077fbb14..34e792290 100644 --- a/website/source/docs/providers/aws/r/api_gateway_model.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_model.html.markdown @@ -12,7 +12,7 @@ Provides a Model for a API Gateway. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_resource.html.markdown b/website/source/docs/providers/aws/r/api_gateway_resource.html.markdown index 8abcd4281..4542aca0a 100644 --- a/website/source/docs/providers/aws/r/api_gateway_resource.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_resource.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway Resource. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_rest_api.html.markdown b/website/source/docs/providers/aws/r/api_gateway_rest_api.html.markdown index 27ef1a2e0..a654804c1 100644 --- a/website/source/docs/providers/aws/r/api_gateway_rest_api.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_rest_api.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway REST API. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "MyDemoAPI" { name = "MyDemoAPI" description = "This is my API for demonstration purposes" diff --git a/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown b/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown index 6a9a0800b..ee8b70c1f 100644 --- a/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_usage_plan.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway Usage Plan. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "myapi" { name = "MyDemoAPI" } diff --git a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown b/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown index 6ec44fcf1..7b1fdf767 100644 --- a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown @@ -12,7 +12,7 @@ Provides an API Gateway Usage Plan Key. ## Example Usage -``` +```hcl resource "aws_api_gateway_rest_api" "test" { name = "MyDemoAPI" } diff --git a/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown index 3cf8ffdca..aff29db4c 100644 --- a/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown +++ b/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown @@ -12,7 +12,7 @@ Provides an application cookie stickiness policy, which allows an ELB to wed its ## Example Usage -``` +```hcl resource "aws_elb" "lb" { name = "test-lb" availability_zones = ["us-east-1a"] diff --git a/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown b/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown index ef4146af8..4d2f217fa 100644 --- a/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown +++ b/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an Application AutoScaling Policy resource. ## Example Usage -``` + +```hcl resource "aws_appautoscaling_target" "ecs_target" { max_capacity = 4 min_capacity = 1 @@ -53,7 +54,7 @@ The following arguments are supported: * `service_namespace` - (Required) The AWS service namespace of the scalable target. Valid values are `ecs` for Amazon ECS services and `ec2` Amazon EC2 Spot fleet requests. * `step_adjustment` - (Optional) A set of adjustments that manage scaling. These have the following structure: - ``` + ```hcl step_adjustment { metric_interval_lower_bound = 1.0 metric_interval_upper_bound = 2.0 diff --git a/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown b/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown index dd1ed34cf..c52834977 100644 --- a/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown +++ b/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an Application AutoScaling ScalableTarget resource. ## Example Usage -``` + +```hcl resource "aws_appautoscaling_target" "ecs_target" { max_capacity = 4 min_capacity = 1 diff --git a/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown b/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown index d12560da2..faf877815 100644 --- a/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown @@ -19,7 +19,7 @@ conflict and will overwrite attachments. ## Example Usage -``` +```hcl # Create a new load balancer attachment resource "aws_autoscaling_attachment" "asg_attachment_bar" { autoscaling_group_name = "${aws_autoscaling_group.asg.id}" @@ -27,7 +27,7 @@ resource "aws_autoscaling_attachment" "asg_attachment_bar" { } ``` -``` +```hcl # Create a new ALB Target Group attachment resource "aws_autoscaling_attachment" "asg_attachment_bar" { autoscaling_group_name = "${aws_autoscaling_group.asg.id}" diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index 07e21766d..86b92a669 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -12,7 +12,7 @@ Provides an AutoScaling Group resource. ## Example Usage -``` +```hcl resource "aws_placement_group" "test" { name = "test" strategy = "cluster" diff --git a/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown b/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown index 63a17ced7..6a1d9468f 100644 --- a/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown @@ -24,7 +24,7 @@ but take care to not duplicate those hooks with this resource. ## Example Usage -``` +```hcl resource "aws_autoscaling_group" "foobar" { availability_zones = ["us-west-2a"] name = "terraform-test-foobar5" diff --git a/website/source/docs/providers/aws/r/autoscaling_notification.html.markdown b/website/source/docs/providers/aws/r/autoscaling_notification.html.markdown index 5229e66db..fb281f147 100644 --- a/website/source/docs/providers/aws/r/autoscaling_notification.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_notification.html.markdown @@ -16,7 +16,7 @@ Services, and are applied to each AutoScaling Group you supply. Basic usage: -``` +```hcl resource "aws_autoscaling_notification" "example_notifications" { group_names = [ "${aws_autoscaling_group.bar.name}", diff --git a/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown b/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown index b57cad5c7..5d37945a8 100644 --- a/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown @@ -17,7 +17,8 @@ or [dynamic](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-sc (policy-based) scaling. ## Example Usage -``` + +```hcl resource "aws_autoscaling_policy" "bat" { name = "foobar3-terraform-test" scaling_adjustment = 4 @@ -59,7 +60,7 @@ The following arguments are only available to "StepScaling" type policies: * `step_adjustments` - (Optional) A set of adjustments that manage group scaling. These have the following structure: -``` +```hcl step_adjustment { scaling_adjustment = -1 metric_interval_lower_bound = 1.0 diff --git a/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown b/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown index ba172609e..1392361ac 100644 --- a/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an AutoScaling Schedule resource. ## Example Usage -``` + +```hcl resource "aws_autoscaling_group" "foobar" { availability_zones = ["us-west-2a"] name = "terraform-test-foobar5" diff --git a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown index 9ab1a8a19..6948fa97a 100644 --- a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown +++ b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown @@ -12,7 +12,7 @@ Provides a CloudFormation Stack resource. ## Example Usage -``` +```hcl resource "aws_cloudformation_stack" "network" { name = "networking-stack" diff --git a/website/source/docs/providers/aws/r/cloudfront_distribution.html.markdown b/website/source/docs/providers/aws/r/cloudfront_distribution.html.markdown index 59cda86b7..cfb25c0d0 100644 --- a/website/source/docs/providers/aws/r/cloudfront_distribution.html.markdown +++ b/website/source/docs/providers/aws/r/cloudfront_distribution.html.markdown @@ -24,7 +24,7 @@ want to wait, you need to use the `retain_on_delete` flag. The following example below creates a CloudFront distribution with an S3 origin. -``` +```hcl resource "aws_s3_bucket" "b" { bucket = "mybucket" acl = "private" diff --git a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown b/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown index 0b384fd13..b35ebc661 100644 --- a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown +++ b/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown @@ -19,7 +19,7 @@ origin access identities, see The following example below creates a CloudFront origin access identity. -``` +```hcl resource "aws_cloudfront_origin_access_identity" "origin_access_identity" { comment = "Some comment" } @@ -55,7 +55,7 @@ The `cloudfront_access_identity_path` allows this to be circumvented. The below snippet demonstrates use with the `s3_origin_config` structure for the [`aws_cloudfront_web_distribution`][3] resource: -``` +```hcl s3_origin_config { origin_access_identity = "${aws_cloudfront_origin_access_identity.origin_access_identity.cloudfront_access_identity_path}" } @@ -68,7 +68,7 @@ principal into an `AWS` IAM ARN principal when supplied in an [`aws_s3_bucket`][4] bucket policy, causing spurious diffs in Terraform. If you see this behaviour, use the `iam_arn` instead: -``` +```hcl data "aws_iam_policy_document" "s3_policy" { statement { actions = ["s3:GetObject"] diff --git a/website/source/docs/providers/aws/r/cloudtrail.html.markdown b/website/source/docs/providers/aws/r/cloudtrail.html.markdown index bebfe49e7..f03cb5b38 100644 --- a/website/source/docs/providers/aws/r/cloudtrail.html.markdown +++ b/website/source/docs/providers/aws/r/cloudtrail.html.markdown @@ -11,7 +11,8 @@ description: |- Provides a CloudTrail resource. ## Example Usage -``` + +```hcl resource "aws_cloudtrail" "foobar" { name = "tf-trail-foobar" s3_bucket_name = "${aws_s3_bucket.foo.id}" diff --git a/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown index ece65d272..fb07969b2 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Event Rule resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_event_rule" "console" { name = "capture-aws-sign-in" description = "Capture each AWS Console Sign In" diff --git a/website/source/docs/providers/aws/r/cloudwatch_event_target.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_event_target.html.markdown index ff60607a9..161097863 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_event_target.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_event_target.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Event Target resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_event_target" "yada" { target_id = "Yada" rule = "${aws_cloudwatch_event_rule.console.name}" diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown index d1b1e9238..b00ac1dbf 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Logs destination resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_destination" "test_destination" { name = "test_destination" role_arn = "${aws_iam_role.iam_for_cloudwatch.arn}" diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown index 452e254dd..46172d613 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Logs destination policy resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_destination" "test_destination" { name = "test_destination" role_arn = "${aws_iam_role.iam_for_cloudwatch.arn}" diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown index bfa9fc4ad..88e7e0c62 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Log Group resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_group" "yada" { name = "Yada" diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown index 24a8dcaf6..492b111c0 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Log Metric Filter resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_metric_filter" "yada" { name = "MyAppAccessCount" pattern = "" diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown index b01d3bb9b..0591814a4 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Log Stream resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_group" "yada" { name = "Yada" } diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown index 882654e07..1a41b40f1 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown @@ -12,7 +12,7 @@ Provides a CloudWatch Logs subscription filter resource. ## Example Usage -``` +```hcl resource "aws_cloudwatch_log_subscription_filter" "test_lambdafunction_logfilter" { name = "test_lambdafunction_logfilter" role_arn = "${aws_iam_role.iam_for_lambda.arn}" diff --git a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown index 80ed352c8..eaa837bab 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown @@ -11,7 +11,8 @@ description: |- Provides a CloudWatch Metric Alarm resource. ## Example Usage -``` + +```hcl resource "aws_cloudwatch_metric_alarm" "foobar" { alarm_name = "terraform-test-foobar5" comparison_operator = "GreaterThanOrEqualToThreshold" @@ -27,7 +28,8 @@ resource "aws_cloudwatch_metric_alarm" "foobar" { ``` ## Example in Conjunction with Scaling Policies -``` + +```hcl resource "aws_autoscaling_policy" "bat" { name = "foobar3-terraform-test" scaling_adjustment = 4 @@ -98,7 +100,6 @@ The following attributes are exported: * `id` - The ID of the health check - ## Import Cloud Metric Alarms can be imported using the `alarm_name`, e.g. diff --git a/website/source/docs/providers/aws/r/code_commit_repository.html.markdown b/website/source/docs/providers/aws/r/code_commit_repository.html.markdown index 8fdf43576..b6e88968e 100644 --- a/website/source/docs/providers/aws/r/code_commit_repository.html.markdown +++ b/website/source/docs/providers/aws/r/code_commit_repository.html.markdown @@ -16,7 +16,7 @@ in all regions - available regions are listed ## Example Usage -``` +```hcl resource "aws_codecommit_repository" "test" { repository_name = "MyTestRepository" description = "This is the Sample App Repository" diff --git a/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown b/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown index 70e2e2347..82dd8dc03 100644 --- a/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown +++ b/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown @@ -16,7 +16,7 @@ in all regions - available regions are listed ## Example Usage -``` +```hcl resource "aws_codecommit_trigger" "test" { depends_on = ["aws_codecommit_repository.test"] repository_name = "my_test_repository" diff --git a/website/source/docs/providers/aws/r/codebuild_project.html.markdown b/website/source/docs/providers/aws/r/codebuild_project.html.markdown index b469815b6..b5e18341f 100644 --- a/website/source/docs/providers/aws/r/codebuild_project.html.markdown +++ b/website/source/docs/providers/aws/r/codebuild_project.html.markdown @@ -12,7 +12,7 @@ Provides a CodeBuild Project resource. ## Example Usage -``` +```hcl resource "aws_iam_role" "codebuild_role" { name = "codebuild-role-" diff --git a/website/source/docs/providers/aws/r/codedeploy_app.html.markdown b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown index 146709e61..f22306c93 100644 --- a/website/source/docs/providers/aws/r/codedeploy_app.html.markdown +++ b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown @@ -12,7 +12,7 @@ Provides a CodeDeploy application to be used as a basis for deployments ## Example Usage -``` +```hcl resource "aws_codedeploy_app" "foo" { name = "foo" } diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_config.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_config.html.markdown index 3b35f4519..9d7cbdd8d 100644 --- a/website/source/docs/providers/aws/r/codedeploy_deployment_config.html.markdown +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_config.html.markdown @@ -12,7 +12,7 @@ Provides a CodeDeploy deployment config for an application ## Example Usage -``` +```hcl resource "aws_codedeploy_deployment_config" "foo" { deployment_config_name = "test-deployment-config" diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown index 35aeda2dc..c7e744049 100644 --- a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown @@ -12,7 +12,7 @@ Provides a CodeDeploy deployment group for an application ## Example Usage -``` +```hcl resource "aws_codedeploy_app" "foo_app" { name = "foo_app" } diff --git a/website/source/docs/providers/aws/r/codepipeline.markdown b/website/source/docs/providers/aws/r/codepipeline.markdown index d4dd59aaa..500b21c6d 100644 --- a/website/source/docs/providers/aws/r/codepipeline.markdown +++ b/website/source/docs/providers/aws/r/codepipeline.markdown @@ -14,7 +14,7 @@ Provides a CodePipeline. ## Example Usage -``` +```hcl resource "aws_s3_bucket" "foo" { bucket = "test-bucket" acl = "private" diff --git a/website/source/docs/providers/aws/r/config_config_rule.html.markdown b/website/source/docs/providers/aws/r/config_config_rule.html.markdown index 9261c9fee..552154744 100644 --- a/website/source/docs/providers/aws/r/config_config_rule.html.markdown +++ b/website/source/docs/providers/aws/r/config_config_rule.html.markdown @@ -14,7 +14,7 @@ Provides an AWS Config Rule. ## Example Usage -``` +```hcl resource "aws_config_config_rule" "r" { name = "example" diff --git a/website/source/docs/providers/aws/r/config_configuration_recorder.html.markdown b/website/source/docs/providers/aws/r/config_configuration_recorder.html.markdown index 7b786bec8..da6f1a2c7 100644 --- a/website/source/docs/providers/aws/r/config_configuration_recorder.html.markdown +++ b/website/source/docs/providers/aws/r/config_configuration_recorder.html.markdown @@ -14,7 +14,7 @@ Provides an AWS Config Configuration Recorder. Please note that this resource ** ## Example Usage -``` +```hcl resource "aws_config_configuration_recorder" "foo" { name = "example" role_arn = "${aws_iam_role.r.arn}" diff --git a/website/source/docs/providers/aws/r/config_configuration_recorder_status.html.markdown b/website/source/docs/providers/aws/r/config_configuration_recorder_status.html.markdown index cc682b8b5..8f60cc6bf 100644 --- a/website/source/docs/providers/aws/r/config_configuration_recorder_status.html.markdown +++ b/website/source/docs/providers/aws/r/config_configuration_recorder_status.html.markdown @@ -14,7 +14,7 @@ Manages status (recording / stopped) of an AWS Config Configuration Recorder. ## Example Usage -``` +```hcl resource "aws_config_configuration_recorder_status" "foo" { name = "${aws_config_configuration_recorder.foo.name}" is_enabled = true diff --git a/website/source/docs/providers/aws/r/config_delivery_channel.html.markdown b/website/source/docs/providers/aws/r/config_delivery_channel.html.markdown index 667e66ba7..9f7fcf46f 100644 --- a/website/source/docs/providers/aws/r/config_delivery_channel.html.markdown +++ b/website/source/docs/providers/aws/r/config_delivery_channel.html.markdown @@ -14,7 +14,7 @@ Provides an AWS Config Delivery Channel. ## Example Usage -``` +```hcl resource "aws_config_delivery_channel" "foo" { name = "example" s3_bucket_name = "${aws_s3_bucket.b.bucket}" diff --git a/website/source/docs/providers/aws/r/customer_gateway.html.markdown b/website/source/docs/providers/aws/r/customer_gateway.html.markdown index 2bbc01e76..6eb2d83da 100644 --- a/website/source/docs/providers/aws/r/customer_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/customer_gateway.html.markdown @@ -14,7 +14,7 @@ Provides a customer gateway inside a VPC. These objects can be connected to VPN ## Example Usage -``` +```hcl resource "aws_customer_gateway" "main" { bgp_asn = 65000 ip_address = "172.83.124.10" diff --git a/website/source/docs/providers/aws/r/db_event_subscription.html.markdown b/website/source/docs/providers/aws/r/db_event_subscription.html.markdown index b7146f262..5103121dc 100644 --- a/website/source/docs/providers/aws/r/db_event_subscription.html.markdown +++ b/website/source/docs/providers/aws/r/db_event_subscription.html.markdown @@ -10,7 +10,7 @@ Provides a DB event subscription resource. ## Example Usage -``` +```hcl resource "aws_db_instance" "default" { allocated_storage = 10 engine = "mysql" diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index 7e6044425..72b2ab1ef 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -30,7 +30,7 @@ for more information. ## Example Usage -``` +```hcl resource "aws_db_instance" "default" { allocated_storage = 10 storage_type = "gp2" diff --git a/website/source/docs/providers/aws/r/db_option_group.html.markdown b/website/source/docs/providers/aws/r/db_option_group.html.markdown index ad4c4d5d4..ee1f8804c 100644 --- a/website/source/docs/providers/aws/r/db_option_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_option_group.html.markdown @@ -10,7 +10,7 @@ Provides an RDS DB option group resource. ## Example Usage -``` +```hcl resource "aws_db_option_group" "bar" { name = "option-group-test-terraform" option_group_description = "Terraform Option Group" diff --git a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown index 2e4d362d2..25ea6b539 100644 --- a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown @@ -10,7 +10,7 @@ Provides an RDS DB parameter group resource. ## Example Usage -``` +```hcl resource "aws_db_parameter_group" "default" { name = "rds-pg" family = "mysql5.6" diff --git a/website/source/docs/providers/aws/r/db_security_group.html.markdown b/website/source/docs/providers/aws/r/db_security_group.html.markdown index 788934edd..275c94690 100644 --- a/website/source/docs/providers/aws/r/db_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_security_group.html.markdown @@ -15,7 +15,7 @@ attribute instead. ## Example Usage -``` +```hcl resource "aws_db_security_group" "default" { name = "rds_sg" diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown index bee5eee52..eaf27e030 100644 --- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown @@ -12,7 +12,7 @@ Provides an RDS DB subnet group resource. ## Example Usage -``` +```hcl resource "aws_db_subnet_group" "default" { name = "main" subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] diff --git a/website/source/docs/providers/aws/r/default_network_acl.html.markdown b/website/source/docs/providers/aws/r/default_network_acl.html.markdown index c61d4d24a..cbabbd22f 100644 --- a/website/source/docs/providers/aws/r/default_network_acl.html.markdown +++ b/website/source/docs/providers/aws/r/default_network_acl.html.markdown @@ -39,7 +39,7 @@ The following config gives the Default Network ACL the same rules that AWS includes, but pulls the resource under management by Terraform. This means that any ACL rules added or changed will be detected as drift. -``` +```hcl resource "aws_vpc" "mainvpc" { cidr_block = "10.1.0.0/16" } @@ -72,7 +72,7 @@ resource "aws_default_network_acl" "default" { The following denies all Egress traffic by omitting any `egress` rules, while including the default `ingress` rule to allow all traffic. -``` +```hcl resource "aws_vpc" "mainvpc" { cidr_block = "10.1.0.0/16" } @@ -97,7 +97,7 @@ This config denies all traffic in the Default ACL. This can be useful if you want a locked down default to force all resources in the VPC to assign a non-default ACL. -``` +```hcl resource "aws_vpc" "mainvpc" { cidr_block = "10.1.0.0/16" } diff --git a/website/source/docs/providers/aws/r/default_route_table.html.markdown b/website/source/docs/providers/aws/r/default_route_table.html.markdown index 4ef364ea2..30a3bc008 100644 --- a/website/source/docs/providers/aws/r/default_route_table.html.markdown +++ b/website/source/docs/providers/aws/r/default_route_table.html.markdown @@ -42,7 +42,7 @@ a conflict of rule settings and will overwrite routes. ## Example usage with tags: -``` +```hcl resource "aws_default_route_table" "r" { default_route_table_id = "${aws_vpc.foo.default_route_table_id}" diff --git a/website/source/docs/providers/aws/r/default_security_group.html.markdown b/website/source/docs/providers/aws/r/default_security_group.html.markdown index ce79bcd7f..d5300f92b 100644 --- a/website/source/docs/providers/aws/r/default_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/default_security_group.html.markdown @@ -40,7 +40,7 @@ The following config gives the Default Security Group the same rules that AWS provides by default, but pulls the resource under management by Terraform. This means that any ingress or egress rules added or changed will be detected as drift. -``` +```hcl resource "aws_vpc" "mainvpc" { cidr_block = "10.1.0.0/16" } @@ -69,7 +69,7 @@ resource "aws_default_security_group" "default" { The following denies all Egress traffic by omitting any `egress` rules, while including the default `ingress` rule to allow all traffic. -``` +```hcl resource "aws_vpc" "mainvpc" { cidr_block = "10.1.0.0/16" } diff --git a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown index 23b1767bb..16bc9145c 100644 --- a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown +++ b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown @@ -15,7 +15,7 @@ Provides a Simple or Managed Microsoft directory in AWS Directory Service. ## Example Usage -``` +```hcl resource "aws_directory_service_directory" "bar" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" diff --git a/website/source/docs/providers/aws/r/dms_certificate.html.markdown b/website/source/docs/providers/aws/r/dms_certificate.html.markdown index 0e2e50eb6..6e7a06d8f 100644 --- a/website/source/docs/providers/aws/r/dms_certificate.html.markdown +++ b/website/source/docs/providers/aws/r/dms_certificate.html.markdown @@ -15,7 +15,7 @@ Provides a DMS (Data Migration Service) certificate resource. DMS certificates c ## Example Usage -``` +```hcl # Create a new certificate resource "aws_dms_certificate" "test" { certificate_id = "test-dms-certificate-tf" diff --git a/website/source/docs/providers/aws/r/dms_endpoint.html.markdown b/website/source/docs/providers/aws/r/dms_endpoint.html.markdown index 2ef626fad..e88b55a29 100644 --- a/website/source/docs/providers/aws/r/dms_endpoint.html.markdown +++ b/website/source/docs/providers/aws/r/dms_endpoint.html.markdown @@ -15,7 +15,7 @@ Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be ## Example Usage -``` +```hcl # Create a new endpoint resource "aws_dms_endpoint" "test" { certificate_arn = "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012" diff --git a/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown b/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown index 0c85ad7d3..c51665817 100644 --- a/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown +++ b/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown @@ -12,7 +12,7 @@ Provides a DMS (Data Migration Service) replication instance resource. DMS repli ## Example Usage -``` +```hcl # Create a new replication instance resource "aws_dms_replication_instance" "test" { allocated_storage = 20 diff --git a/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown b/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown index 84ac1d103..d3aa97fde 100644 --- a/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown @@ -12,7 +12,7 @@ Provides a DMS (Data Migration Service) replication subnet group resource. DMS r ## Example Usage -``` +```hcl # Create a new replication subnet group resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_description = "Test replication subnet group" diff --git a/website/source/docs/providers/aws/r/dms_replication_task.html.markdown b/website/source/docs/providers/aws/r/dms_replication_task.html.markdown index 60ef2e99d..eb24c0430 100644 --- a/website/source/docs/providers/aws/r/dms_replication_task.html.markdown +++ b/website/source/docs/providers/aws/r/dms_replication_task.html.markdown @@ -12,7 +12,7 @@ Provides a DMS (Data Migration Service) replication task resource. DMS replicati ## Example Usage -``` +```hcl # Create a new replication task resource "aws_dms_replication_task" "test" { cdc_start_time = 1484346880 diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown index fe532610a..b3cd64cf4 100644 --- a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown +++ b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown @@ -15,7 +15,7 @@ Provides a DynamoDB table resource The following dynamodb table description models the table and GSI shown in the [AWS SDK example documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html) -``` +```hcl resource "aws_dynamodb_table" "basic-dynamodb-table" { name = "GameScores" read_capacity = 20 diff --git a/website/source/docs/providers/aws/r/ebs_snapshot.html.md b/website/source/docs/providers/aws/r/ebs_snapshot.html.md index 5c6eebe1e..fffa6c978 100644 --- a/website/source/docs/providers/aws/r/ebs_snapshot.html.md +++ b/website/source/docs/providers/aws/r/ebs_snapshot.html.md @@ -12,7 +12,7 @@ Creates a Snapshot of an EBS Volume. ## Example Usage -``` +```hcl resource "aws_ebs_volume" "example" { availability_zone = "us-west-2a" size = 40 diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md index a5e4cade9..205473f98 100644 --- a/website/source/docs/providers/aws/r/ebs_volume.html.md +++ b/website/source/docs/providers/aws/r/ebs_volume.html.md @@ -12,7 +12,7 @@ Manages a single EBS volume. ## Example Usage -``` +```hcl resource "aws_ebs_volume" "example" { availability_zone = "us-west-2a" size = 40 diff --git a/website/source/docs/providers/aws/r/ecr_repository.html.markdown b/website/source/docs/providers/aws/r/ecr_repository.html.markdown index e591b8509..d9783e0a2 100644 --- a/website/source/docs/providers/aws/r/ecr_repository.html.markdown +++ b/website/source/docs/providers/aws/r/ecr_repository.html.markdown @@ -16,7 +16,7 @@ in all regions - available regions are listed ## Example Usage -``` +```hcl resource "aws_ecr_repository" "foo" { name = "bar" } diff --git a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown b/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown index 70bb3c0db..a6437cf6c 100644 --- a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown +++ b/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown @@ -18,7 +18,7 @@ in all regions - available regions are listed ## Example Usage -``` +```hcl resource "aws_ecr_repository" "foo" { name = "bar" } diff --git a/website/source/docs/providers/aws/r/ecs_cluster.html.markdown b/website/source/docs/providers/aws/r/ecs_cluster.html.markdown index d00e05fab..569bcc9b5 100644 --- a/website/source/docs/providers/aws/r/ecs_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_cluster.html.markdown @@ -12,7 +12,7 @@ Provides an ECS cluster. ## Example Usage -``` +```hcl resource "aws_ecs_cluster" "foo" { name = "white-hart" } diff --git a/website/source/docs/providers/aws/r/ecs_service.html.markdown b/website/source/docs/providers/aws/r/ecs_service.html.markdown index 8e8c9716f..54558a70a 100644 --- a/website/source/docs/providers/aws/r/ecs_service.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_service.html.markdown @@ -16,7 +16,7 @@ See [ECS Services section in AWS developer guide](https://docs.aws.amazon.com/Am ## Example Usage -``` +```hcl resource "aws_ecs_service" "mongo" { name = "mongodb" cluster = "${aws_ecs_cluster.foo.id}" diff --git a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown b/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown index 0db84067a..fd40c9ef0 100644 --- a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown @@ -12,7 +12,7 @@ Provides an ECS task definition to be used in `aws_ecs_service`. ## Example Usage -``` +```hcl resource "aws_ecs_task_definition" "service" { family = "service" container_definitions = "${file("task-definitions/service.json")}" diff --git a/website/source/docs/providers/aws/r/efs_file_system.html.markdown b/website/source/docs/providers/aws/r/efs_file_system.html.markdown index 834b2c944..c65d1322d 100644 --- a/website/source/docs/providers/aws/r/efs_file_system.html.markdown +++ b/website/source/docs/providers/aws/r/efs_file_system.html.markdown @@ -12,7 +12,7 @@ Provides an Elastic File System (EFS) resource. ## Example Usage -``` +```hcl resource "aws_efs_file_system" "foo" { creation_token = "my-product" diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown index d91ee0934..eb3f3562e 100644 --- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown +++ b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown @@ -12,7 +12,7 @@ Provides an Elastic File System (EFS) mount target. ## Example Usage -``` +```hcl resource "aws_efs_mount_target" "alpha" { file_system_id = "${aws_efs_file_system.foo.id}" subnet_id = "${aws_subnet.alpha.id}" diff --git a/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown b/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown index 613a40315..e4238a25a 100644 --- a/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown @@ -15,7 +15,7 @@ outside of your VPC from initiating an IPv6 connection with your instance. ## Example Usage -``` +```hcl resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" assign_amazon_ipv6_cidr_block = true diff --git a/website/source/docs/providers/aws/r/eip.html.markdown b/website/source/docs/providers/aws/r/eip.html.markdown index 615dc0fef..1eb96a092 100644 --- a/website/source/docs/providers/aws/r/eip.html.markdown +++ b/website/source/docs/providers/aws/r/eip.html.markdown @@ -14,7 +14,7 @@ Provides an Elastic IP resource. Single EIP associated with an instance: -``` +```hcl resource "aws_eip" "lb" { instance = "${aws_instance.web.id}" vpc = true @@ -23,7 +23,7 @@ resource "aws_eip" "lb" { Multiple EIPs associated with a single network interface: -``` +```hcl resource "aws_network_interface" "multi-ip" { subnet_id = "${aws_subnet.main.id}" private_ips = ["10.0.0.10", "10.0.0.11"] @@ -44,7 +44,7 @@ resource "aws_eip" "two" { Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only): -``` +```hcl resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" enable_dns_hostnames = true diff --git a/website/source/docs/providers/aws/r/eip_association.html.markdown b/website/source/docs/providers/aws/r/eip_association.html.markdown index a2f3b8b98..fe473b51f 100644 --- a/website/source/docs/providers/aws/r/eip_association.html.markdown +++ b/website/source/docs/providers/aws/r/eip_association.html.markdown @@ -16,7 +16,7 @@ pre-existing or distributed to customers or users and therefore cannot be change ## Example Usage -``` +```hcl resource "aws_eip_association" "eip_assoc" { instance_id = "${aws_instance.web.id}" allocation_id = "${aws_eip.example.id}" diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown index 2298070cd..d8437fff3 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown @@ -17,7 +17,7 @@ This resource creates an application that has one configuration template named ## Example Usage -``` +```hcl resource "aws_elastic_beanstalk_application" "tftest" { name = "tf-test-name" description = "tf-test-desc" diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown index be8f39000..5cf76b849 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown @@ -27,7 +27,7 @@ Elastic Beanstalk Application. For example <revision>-<environment>. ## Example Usage -``` +```hcl resource "aws_s3_bucket" "default" { bucket = "tftest.applicationversion.bucket" } diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown index 56e38ac6c..37b0bc775 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown @@ -14,8 +14,7 @@ application with the same configuration settings. ## Example Usage - -``` +```hcl resource "aws_elastic_beanstalk_application" "tftest" { name = "tf-test-name" description = "tf-test-desc" diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown index f6d3d5c0b..ef27d6d84 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown @@ -17,8 +17,7 @@ Environments are often things such as `development`, `integration`, or ## Example Usage - -``` +```hcl resource "aws_elastic_beanstalk_application" "tftest" { name = "tf-test-name" description = "tf-test-desc" @@ -80,7 +79,7 @@ The `setting` and `all_settings` mappings support the following format: ### Example With Options -``` +```hcl resource "aws_elastic_beanstalk_application" "tftest" { name = "tf-test-name" description = "tf-test-desc" diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown index be272ddd3..2db02982d 100644 --- a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown @@ -12,7 +12,7 @@ Provides an Elastic Transcoder pipeline resource. ## Example Usage -``` +```hcl resource "aws_elastictranscoder_pipeline" "bar" { input_bucket = "${aws_s3_bucket.input_bucket.bucket}" name = "aws_elastictranscoder_pipeline_tf_test_" diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown index 44b53dcde..930dc8fce 100644 --- a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown @@ -12,7 +12,7 @@ Provides an Elastic Transcoder preset resource. ## Example Usage -``` +```hcl resource "aws_elastictranscoder_preset" "bar" { container = "mp4" description = "Sample Preset" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 31b31346e..d9c5637a0 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -23,7 +23,7 @@ brief downtime as the server reboots. See the AWS Docs on ## Example Usage -``` +```hcl resource "aws_elasticache_cluster" "bar" { cluster_id = "cluster-example" engine = "memcached" diff --git a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown index 9b81b525f..b322aaa12 100644 --- a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown @@ -10,7 +10,7 @@ Provides an ElastiCache parameter group resource. ## Example Usage -``` +```hcl resource "aws_elasticache_parameter_group" "default" { name = "cache-params" family = "redis2.8" diff --git a/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown index bf35205e8..a14d4e855 100644 --- a/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown @@ -12,7 +12,7 @@ Provides an ElastiCache Replication Group resource. ## Example Usage -``` +```hcl resource "aws_elasticache_replication_group" "bar" { replication_group_id = "tf-rep-group-1" replication_group_description = "test description" diff --git a/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown index dbd7e4176..08b0c2d2d 100644 --- a/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown @@ -17,7 +17,7 @@ ElastiCache cluster **outside** of a VPC. If you are using a VPC, see the ## Example Usage -``` +```hcl resource "aws_security_group" "bar" { name = "security-group" } diff --git a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown index c083ddeb4..dbcc85980 100644 --- a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown @@ -16,7 +16,7 @@ ElastiCache cluster **inside** of a VPC. If you are on EC2 Classic, see the ## Example Usage -``` +```hcl resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown index cf18732e7..5ad289022 100644 --- a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown +++ b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown @@ -11,7 +11,7 @@ description: |- ## Example Usage -``` +```hcl resource "aws_elasticsearch_domain" "es" { domain_name = "tf-test" elasticsearch_version = "1.5" diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain_policy.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain_policy.html.markdown index 1bc6a71ab..9925d49c2 100644 --- a/website/source/docs/providers/aws/r/elasticsearch_domain_policy.html.markdown +++ b/website/source/docs/providers/aws/r/elasticsearch_domain_policy.html.markdown @@ -12,7 +12,7 @@ Allows setting policy to an ElasticSearch domain while referencing domain attrib ## Example Usage -``` +```hcl resource "aws_elasticsearch_domain" "example" { domain_name = "tf-test" elasticsearch_version = "2.3" diff --git a/website/source/docs/providers/aws/r/elb.html.markdown b/website/source/docs/providers/aws/r/elb.html.markdown index f682817d0..06b1605d8 100644 --- a/website/source/docs/providers/aws/r/elb.html.markdown +++ b/website/source/docs/providers/aws/r/elb.html.markdown @@ -21,7 +21,7 @@ conflict and will overwrite attachments. ## Example Usage -``` +```hcl # Create a new load balancer resource "aws_elb" "bar" { name = "foobar-terraform-elb" diff --git a/website/source/docs/providers/aws/r/elb_attachment.html.markdown b/website/source/docs/providers/aws/r/elb_attachment.html.markdown index 23e68ed20..6ac433b31 100644 --- a/website/source/docs/providers/aws/r/elb_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/elb_attachment.html.markdown @@ -18,7 +18,7 @@ instances in conjunction with an ELB Attachment resource. Doing so will cause a conflict and will overwrite attachments. ## Example Usage -``` +```hcl # Create a new load balancer attachment resource "aws_elb_attachment" "baz" { elb = "${aws_elb.bar.id}" diff --git a/website/source/docs/providers/aws/r/emr_cluster.html.md b/website/source/docs/providers/aws/r/emr_cluster.html.md index e2c19172d..cca374e58 100644 --- a/website/source/docs/providers/aws/r/emr_cluster.html.md +++ b/website/source/docs/providers/aws/r/emr_cluster.html.md @@ -14,7 +14,7 @@ for more information. ## Example Usage -``` +```hcl resource "aws_emr_cluster" "emr-test-cluster" { name = "emr-test-arn" release_label = "emr-4.6.0" diff --git a/website/source/docs/providers/aws/r/emr_instance_group.html.md b/website/source/docs/providers/aws/r/emr_instance_group.html.md index a98c79f02..a5814af27 100644 --- a/website/source/docs/providers/aws/r/emr_instance_group.html.md +++ b/website/source/docs/providers/aws/r/emr_instance_group.html.md @@ -17,7 +17,7 @@ Terraform will resize any Instance Group to zero when destroying the resource. ## Example Usage -``` +```hcl resource "aws_emr_instance_group" "task" { cluster_id = "${aws_emr_cluster.tf-test-cluster.id}" instance_count = 1 diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index b1ff37b1a..142e40323 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -14,7 +14,7 @@ Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide ## Example Usage -``` +```hcl resource "aws_sns_topic" "aws_sns_topic" { name = "glacier-sns-topic" } diff --git a/website/source/docs/providers/aws/r/iam_access_key.html.markdown b/website/source/docs/providers/aws/r/iam_access_key.html.markdown index f66b35c71..f778bd1b5 100644 --- a/website/source/docs/providers/aws/r/iam_access_key.html.markdown +++ b/website/source/docs/providers/aws/r/iam_access_key.html.markdown @@ -12,7 +12,7 @@ Provides an IAM access key. This is a set of credentials that allow API requests ## Example Usage -``` +```hcl resource "aws_iam_access_key" "lb" { user = "${aws_iam_user.lb.name}" pgp_key = "keybase:some_person_that_exists" diff --git a/website/source/docs/providers/aws/r/iam_account_alias.html.markdown b/website/source/docs/providers/aws/r/iam_account_alias.html.markdown index 7acd08834..67365a541 100644 --- a/website/source/docs/providers/aws/r/iam_account_alias.html.markdown +++ b/website/source/docs/providers/aws/r/iam_account_alias.html.markdown @@ -14,7 +14,7 @@ Manages the account alias for the AWS Account. ## Example Usage -``` +```hcl resource "aws_iam_account_alias" "alias" { account_alias = "my-account-alias" } diff --git a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown b/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown index 2957c0245..3cb4631dd 100644 --- a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown @@ -16,7 +16,7 @@ in the official AWS docs. ## Example Usage -``` +```hcl resource "aws_iam_account_password_policy" "strict" { minimum_password_length = 8 require_lowercase_characters = true diff --git a/website/source/docs/providers/aws/r/iam_group.html.markdown b/website/source/docs/providers/aws/r/iam_group.html.markdown index 82c884905..da1486d24 100644 --- a/website/source/docs/providers/aws/r/iam_group.html.markdown +++ b/website/source/docs/providers/aws/r/iam_group.html.markdown @@ -12,7 +12,7 @@ Provides an IAM group. ## Example Usage -``` +```hcl resource "aws_iam_group" "developers" { name = "developers" path = "/users/" diff --git a/website/source/docs/providers/aws/r/iam_group_membership.html.markdown b/website/source/docs/providers/aws/r/iam_group_membership.html.markdown index a9f1d95e7..eb929b82e 100644 --- a/website/source/docs/providers/aws/r/iam_group_membership.html.markdown +++ b/website/source/docs/providers/aws/r/iam_group_membership.html.markdown @@ -14,7 +14,7 @@ more information on managing IAM Groups or IAM Users, see [IAM Groups][1] or ## Example Usage -``` +```hcl resource "aws_iam_group_membership" "team" { name = "tf-testing-group-membership" diff --git a/website/source/docs/providers/aws/r/iam_group_policy.html.markdown b/website/source/docs/providers/aws/r/iam_group_policy.html.markdown index 09b6f963b..4bf62e931 100644 --- a/website/source/docs/providers/aws/r/iam_group_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_group_policy.html.markdown @@ -12,7 +12,7 @@ Provides an IAM policy attached to a group. ## Example Usage -``` +```hcl resource "aws_iam_group_policy" "my_developer_policy" { name = "my_developer_policy" group = "${aws_iam_group.my_developers.id}" diff --git a/website/source/docs/providers/aws/r/iam_group_policy_attachment.markdown b/website/source/docs/providers/aws/r/iam_group_policy_attachment.markdown index eb3822dfc..4291269d0 100644 --- a/website/source/docs/providers/aws/r/iam_group_policy_attachment.markdown +++ b/website/source/docs/providers/aws/r/iam_group_policy_attachment.markdown @@ -10,7 +10,7 @@ description: |- Attaches a Managed IAM Policy to an IAM group -``` +```hcl resource "aws_iam_group" "group" { name = "test-group" } diff --git a/website/source/docs/providers/aws/r/iam_instance_profile.html.markdown b/website/source/docs/providers/aws/r/iam_instance_profile.html.markdown index 56182dfdf..24b522b32 100644 --- a/website/source/docs/providers/aws/r/iam_instance_profile.html.markdown +++ b/website/source/docs/providers/aws/r/iam_instance_profile.html.markdown @@ -14,7 +14,7 @@ Provides an IAM instance profile. ## Example Usage -``` +```hcl resource "aws_iam_instance_profile" "test_profile" { name = "test_profile" role = "${aws_iam_role.role.name}" diff --git a/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown b/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown index 2e312e5b9..bdd49c4ef 100644 --- a/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown +++ b/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown @@ -12,7 +12,7 @@ Provides an IAM OpenID Connect provider. ## Example Usage -``` +```hcl resource "aws_iam_openid_connect_provider" "default" { url = "https://accounts.google.com" client_id_list = [ diff --git a/website/source/docs/providers/aws/r/iam_policy.html.markdown b/website/source/docs/providers/aws/r/iam_policy.html.markdown index 3f2fdc7a5..e82c4b6f4 100644 --- a/website/source/docs/providers/aws/r/iam_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_policy.html.markdown @@ -10,7 +10,7 @@ description: |- Provides an IAM policy. -``` +```hcl resource "aws_iam_policy" "policy" { name = "test_policy" path = "/" diff --git a/website/source/docs/providers/aws/r/iam_policy_attachment.html.markdown b/website/source/docs/providers/aws/r/iam_policy_attachment.html.markdown index f4034dd6c..86fc9ea49 100644 --- a/website/source/docs/providers/aws/r/iam_policy_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/iam_policy_attachment.html.markdown @@ -12,7 +12,7 @@ Attaches a Managed IAM Policy to user(s), role(s), and/or group(s) ~> **NOTE:** The aws_iam_policy_attachment resource is only meant to be used once for each managed policy. All of the users/roles/groups that a single policy is being attached to should be declared by a single aws_iam_policy_attachment resource. -``` +```hcl resource "aws_iam_user" "user" { name = "test-user" } diff --git a/website/source/docs/providers/aws/r/iam_role.html.markdown b/website/source/docs/providers/aws/r/iam_role.html.markdown index 7abee66b0..ecba4be52 100644 --- a/website/source/docs/providers/aws/r/iam_role.html.markdown +++ b/website/source/docs/providers/aws/r/iam_role.html.markdown @@ -12,7 +12,7 @@ Provides an IAM role. ## Example Usage -``` +```hcl resource "aws_iam_role" "test_role" { name = "test_role" @@ -58,7 +58,7 @@ The following attributes are exported: ## Example of Using Data Source for Assume Role Policy -``` +```hcl data "aws_iam_policy_document" "instance-assume-role-policy" { statement { actions = ["sts:AssumeRole"] diff --git a/website/source/docs/providers/aws/r/iam_role_policy.html.markdown b/website/source/docs/providers/aws/r/iam_role_policy.html.markdown index c05db5e51..540ee0105 100644 --- a/website/source/docs/providers/aws/r/iam_role_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_role_policy.html.markdown @@ -12,7 +12,7 @@ Provides an IAM role policy. ## Example Usage -``` +```hcl resource "aws_iam_role_policy" "test_policy" { name = "test_policy" role = "${aws_iam_role.test_role.id}" diff --git a/website/source/docs/providers/aws/r/iam_role_policy_attachment.markdown b/website/source/docs/providers/aws/r/iam_role_policy_attachment.markdown index 64cb9c556..f3d826d81 100644 --- a/website/source/docs/providers/aws/r/iam_role_policy_attachment.markdown +++ b/website/source/docs/providers/aws/r/iam_role_policy_attachment.markdown @@ -10,7 +10,7 @@ description: |- Attaches a Managed IAM Policy to an IAM role -``` +```hcl resource "aws_iam_role" "role" { name = "test-role" } diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown index 97500bfd1..05335aed6 100644 --- a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -12,7 +12,7 @@ Provides an IAM SAML provider. ## Example Usage -``` +```hcl resource "aws_iam_saml_provider" "default" { name = "myprovider" saml_metadata_document = "${file("saml-metadata.xml")}" diff --git a/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown b/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown index 15effdc1e..e49f9623b 100644 --- a/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown +++ b/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown @@ -26,7 +26,7 @@ Certificates][2] in AWS Documentation. **Using certs on file:** -``` +```hcl resource "aws_iam_server_certificate" "test_cert" { name = "some_test_cert" certificate_body = "${file("self-ca-cert.pem")}" @@ -36,7 +36,7 @@ resource "aws_iam_server_certificate" "test_cert" { **Example with cert in-line:** -``` +```hcl resource "aws_iam_server_certificate" "test_cert_alt" { name = "alt_test_cert" @@ -63,8 +63,7 @@ recommended you utilize the `name_prefix` attribute and enable the to create a new, updated `aws_iam_server_certificate` resource and replace it in dependant resources before attempting to destroy the old version. - -``` +```hcl resource "aws_iam_server_certificate" "test_cert" { name_prefix = "example-cert" certificate_body = "${file("self-ca-cert.pem")}" diff --git a/website/source/docs/providers/aws/r/iam_user.html.markdown b/website/source/docs/providers/aws/r/iam_user.html.markdown index 4a7bd77db..2966c4b5f 100644 --- a/website/source/docs/providers/aws/r/iam_user.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user.html.markdown @@ -12,7 +12,7 @@ Provides an IAM user. ## Example Usage -``` +```hcl resource "aws_iam_user" "lb" { name = "loadbalancer" path = "/system/" diff --git a/website/source/docs/providers/aws/r/iam_user_login_profile.html.markdown b/website/source/docs/providers/aws/r/iam_user_login_profile.html.markdown index 4b5cef625..6c52caf63 100644 --- a/website/source/docs/providers/aws/r/iam_user_login_profile.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user_login_profile.html.markdown @@ -14,7 +14,7 @@ obtained from Keybase. ## Example Usage -``` +```hcl resource "aws_iam_user" "u" { name = "auser" path = "/" diff --git a/website/source/docs/providers/aws/r/iam_user_policy.html.markdown b/website/source/docs/providers/aws/r/iam_user_policy.html.markdown index 4f3caaead..4d32daeae 100644 --- a/website/source/docs/providers/aws/r/iam_user_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user_policy.html.markdown @@ -12,7 +12,7 @@ Provides an IAM policy attached to a user. ## Example Usage -``` +```hcl resource "aws_iam_user_policy" "lb_ro" { name = "test" user = "${aws_iam_user.lb.name}" diff --git a/website/source/docs/providers/aws/r/iam_user_policy_attachment.markdown b/website/source/docs/providers/aws/r/iam_user_policy_attachment.markdown index 2415cb832..5e80e898f 100644 --- a/website/source/docs/providers/aws/r/iam_user_policy_attachment.markdown +++ b/website/source/docs/providers/aws/r/iam_user_policy_attachment.markdown @@ -10,7 +10,7 @@ description: |- Attaches a Managed IAM Policy to an IAM user -``` +```hcl resource "aws_iam_user" "user" { name = "test-user" } diff --git a/website/source/docs/providers/aws/r/iam_user_ssh_key.html.markdown b/website/source/docs/providers/aws/r/iam_user_ssh_key.html.markdown index 1e5b4f950..d81a650cc 100644 --- a/website/source/docs/providers/aws/r/iam_user_ssh_key.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user_ssh_key.html.markdown @@ -12,7 +12,7 @@ Uploads an SSH public key and associates it with the specified IAM user. ## Example Usage -``` +```hcl resource "aws_iam_user" "user" { name = "test-user" path = "/" diff --git a/website/source/docs/providers/aws/r/inspector_assessment_target.html.markdown b/website/source/docs/providers/aws/r/inspector_assessment_target.html.markdown index e2e5f343e..5072affa7 100644 --- a/website/source/docs/providers/aws/r/inspector_assessment_target.html.markdown +++ b/website/source/docs/providers/aws/r/inspector_assessment_target.html.markdown @@ -12,7 +12,7 @@ Provides a Inspector assessment target ## Example Usage -``` +```hcl resource "aws_inspector_resource_group" "bar" { tags { Name = "foo" diff --git a/website/source/docs/providers/aws/r/inspector_assessment_template.html.markdown b/website/source/docs/providers/aws/r/inspector_assessment_template.html.markdown index f906e1628..255f17965 100644 --- a/website/source/docs/providers/aws/r/inspector_assessment_template.html.markdown +++ b/website/source/docs/providers/aws/r/inspector_assessment_template.html.markdown @@ -12,7 +12,7 @@ Provides a Inspector assessment template ## Example Usage -``` +```hcl resource "aws_inspector_assessment_template" "foo" { name = "bar template" target_arn = "${aws_inspector_assessment_target.foo.arn}" diff --git a/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown b/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown index 362754d59..d6bc5c96f 100644 --- a/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown +++ b/website/source/docs/providers/aws/r/inspector_resource_group.html.markdown @@ -12,7 +12,7 @@ Provides a Inspector resource group ## Example Usage -``` +```hcl resource "aws_inspector_resource_group" "bar" { tags { Name = "foo" diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index 33ebdc549..4d8725de2 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -13,7 +13,7 @@ and deleted. Instances also support [provisioning](/docs/provisioners/index.html ## Example Usage -``` +```hcl # Create a new instance of the latest Ubuntu 14.04 on an # t2.micro node with an AWS Tag naming it "HelloWorld" provider "aws" { diff --git a/website/source/docs/providers/aws/r/internet_gateway.html.markdown b/website/source/docs/providers/aws/r/internet_gateway.html.markdown index 5b2a29bf9..339a894e6 100644 --- a/website/source/docs/providers/aws/r/internet_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/internet_gateway.html.markdown @@ -12,7 +12,7 @@ Provides a resource to create a VPC Internet Gateway. ## Example Usage -``` +```hcl resource "aws_internet_gateway" "gw" { vpc_id = "${aws_vpc.main.id}" diff --git a/website/source/docs/providers/aws/r/key_pair.html.markdown b/website/source/docs/providers/aws/r/key_pair.html.markdown index acb49999e..89ad0abfb 100644 --- a/website/source/docs/providers/aws/r/key_pair.html.markdown +++ b/website/source/docs/providers/aws/r/key_pair.html.markdown @@ -20,7 +20,7 @@ When importing an existing key pair the public key material may be in any format ## Example Usage -``` +```hcl resource "aws_key_pair" "deployer" { key_name = "deployer-key" public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 email@example.com" diff --git a/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown b/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown index 57dca62e1..c77386d2e 100644 --- a/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown @@ -15,7 +15,8 @@ For more details, see the [Amazon Kinesis Firehose Documentation][1]. ## Example Usage ### S3 Destination -``` + +```hcl resource "aws_s3_bucket" "bucket" { bucket = "tf-test-bucket" acl = "private" @@ -54,7 +55,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { ### Redshift Destination -``` +```hcl resource "aws_redshift_cluster" "test_cluster" { cluster_identifier = "tf-redshift-cluster-%d" database_name = "test" @@ -90,7 +91,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { ### Elasticsearch Destination -``` +```hcl resource "aws_elasticsearch_domain" "test_cluster" { domain_name = "firehose-es-test" } @@ -175,8 +176,6 @@ The `cloudwatch_logging_options` object supports the following: * `log_group_name` - (Optional) The CloudWatch group name for logging. This value is required if `enabled` is true. * `log_stream_name` - (Optional) The CloudWatch log stream name for logging. This value is required if `enabled` is true. - - ## Attributes Reference * `arn` - The Amazon Resource Name (ARN) specifying the Stream diff --git a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown b/website/source/docs/providers/aws/r/kinesis_stream.html.markdown index c4e316ab5..90f0d81a0 100644 --- a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown +++ b/website/source/docs/providers/aws/r/kinesis_stream.html.markdown @@ -15,7 +15,7 @@ For more details, see the [Amazon Kinesis Documentation][1]. ## Example Usage -``` +```hcl resource "aws_kinesis_stream" "test_stream" { name = "terraform-kinesis-test" shard_count = 1 diff --git a/website/source/docs/providers/aws/r/kms_alias.html.markdown b/website/source/docs/providers/aws/r/kms_alias.html.markdown index 368531032..b92bde5bd 100644 --- a/website/source/docs/providers/aws/r/kms_alias.html.markdown +++ b/website/source/docs/providers/aws/r/kms_alias.html.markdown @@ -14,7 +14,7 @@ the [account limits](http://docs.aws.amazon.com/kms/latest/developerguide/limits ## Example Usage -``` +```hcl resource "aws_kms_key" "a" {} resource "aws_kms_alias" "a" { diff --git a/website/source/docs/providers/aws/r/kms_key.html.markdown b/website/source/docs/providers/aws/r/kms_key.html.markdown index 9a494977f..2a7f014e1 100644 --- a/website/source/docs/providers/aws/r/kms_key.html.markdown +++ b/website/source/docs/providers/aws/r/kms_key.html.markdown @@ -12,7 +12,7 @@ Provides a KMS customer master key. ## Example Usage -``` +```hcl resource "aws_kms_key" "a" { description = "KMS key 1" deletion_window_in_days = 10 diff --git a/website/source/docs/providers/aws/r/lambda_alias.html.markdown b/website/source/docs/providers/aws/r/lambda_alias.html.markdown index de057611d..dac242fa0 100644 --- a/website/source/docs/providers/aws/r/lambda_alias.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_alias.html.markdown @@ -15,7 +15,7 @@ For information about function aliases, see [CreateAlias][2] in the API docs. ## Example Usage -``` +```hcl resource "aws_lambda_alias" "test_alias" { name = "testalias" description = "a sample description" diff --git a/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown b/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown index 7fa4b53f2..e9b89bd97 100644 --- a/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown @@ -15,7 +15,7 @@ For information about event source mappings, see [CreateEventSourceMapping][2] i ## Example Usage -``` +```hcl resource "aws_lambda_event_source_mapping" "event_source_mapping" { batch_size = 100 event_source_arn = "arn:aws:kinesis:REGION:123456789012:stream/stream_name" diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown index bda53ff2a..69e2003ed 100644 --- a/website/source/docs/providers/aws/r/lambda_function.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_function.html.markdown @@ -14,7 +14,7 @@ For information about Lambda and how to use it, see [What is AWS Lambda?][1] ## Example Usage -``` +```hcl resource "aws_iam_role" "iam_for_lambda" { name = "iam_for_lambda" diff --git a/website/source/docs/providers/aws/r/lambda_permission.html.markdown b/website/source/docs/providers/aws/r/lambda_permission.html.markdown index 3360aef2d..9c962ec29 100644 --- a/website/source/docs/providers/aws/r/lambda_permission.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_permission.html.markdown @@ -13,7 +13,7 @@ Creates a Lambda permission to allow external sources invoking the Lambda functi ## Example Usage -``` +```hcl resource "aws_lambda_permission" "allow_cloudwatch" { statement_id = "AllowExecutionFromCloudWatch" action = "lambda:InvokeFunction" @@ -62,7 +62,7 @@ EOF ## Usage with SNS -``` +```hcl resource "aws_lambda_permission" "with_sns" { statement_id = "AllowExecutionFromSNS" action = "lambda:InvokeFunction" diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index b5d283c38..10c6df8bd 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -12,7 +12,7 @@ Provides a resource to create a new launch configuration, used for autoscaling g ## Example Usage -``` +```hcl data "aws_ami" "ubuntu" { most_recent = true @@ -46,7 +46,7 @@ it's recommended to specify `create_before_destroy` in a [lifecycle][2] block. Either omit the Launch Configuration `name` attribute, or specify a partial name with `name_prefix`. Example: -``` +```hcl data "aws_ami" "ubuntu" { most_recent = true @@ -98,7 +98,7 @@ reserve your instances at this price. See the [AWS Spot Instance documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html) for more information or how to launch [Spot Instances][3] with Terraform. -``` +```hcl data "aws_ami" "ubuntu" { most_recent = true diff --git a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown index e6f49169a..08914033f 100644 --- a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown +++ b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown @@ -12,7 +12,7 @@ Provides a load balancer cookie stickiness policy, which allows an ELB to contro ## Example Usage -``` +```hcl resource "aws_elb" "lb" { name = "test-lb" availability_zones = ["us-east-1a"] diff --git a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown index 14c820f1d..92a02179e 100644 --- a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown +++ b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown @@ -12,7 +12,7 @@ Provides a load balancer SSL negotiation policy, which allows an ELB to control ## Example Usage -``` +```hcl resource "aws_elb" "lb" { name = "test-lb" availability_zones = ["us-east-1a"] diff --git a/website/source/docs/providers/aws/r/lightsail_domain.html.markdown b/website/source/docs/providers/aws/r/lightsail_domain.html.markdown index e3cf9a87d..3714aedb5 100644 --- a/website/source/docs/providers/aws/r/lightsail_domain.html.markdown +++ b/website/source/docs/providers/aws/r/lightsail_domain.html.markdown @@ -18,7 +18,7 @@ this parameter to manage the DNS records for that domain. ## Example Usage, creating a new domain -``` +```hcl resource "aws_lightsail_domain" "domain_test" { domain_name = "mydomain.com" } diff --git a/website/source/docs/providers/aws/r/lightsail_instance.html.markdown b/website/source/docs/providers/aws/r/lightsail_instance.html.markdown index f86c75dff..522041e9c 100644 --- a/website/source/docs/providers/aws/r/lightsail_instance.html.markdown +++ b/website/source/docs/providers/aws/r/lightsail_instance.html.markdown @@ -16,7 +16,7 @@ Note: Lightsail is currently only supported in `us-east-1` region. ## Example Usage -``` +```hcl # Create a new GitLab Lightsail Instance resource "aws_lightsail_instance" "gitlab_test" { name = "custom gitlab" diff --git a/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown b/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown index 5ed4be2f2..51b9408b5 100644 --- a/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown +++ b/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown @@ -16,7 +16,7 @@ Lightsail. ## Example Usage, creating a new Key Pair -``` +```hcl # Create a new Lightsail Key Pair resource "aws_lightsail_key_pair" "lg_key_pair" { name = "lg_key_pair" @@ -25,7 +25,7 @@ resource "aws_lightsail_key_pair" "lg_key_pair" { ## Create new Key Pair, encrypting the private key with a PGP Key -``` +```hcl resource "aws_lightsail_key_pair" "lg_key_pair" { name = "lg_key_pair" pgp_key = "keybase:keybaseusername" @@ -34,7 +34,7 @@ resource "aws_lightsail_key_pair" "lg_key_pair" { ## Import an existing public key -``` +```hcl resource "aws_lightsail_key_pair" "lg_key_pair" { name = "importing" public_key = "${file("~/.ssh/id_rsa.pub")}" diff --git a/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown b/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown index b3617a432..afc8784a8 100644 --- a/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown +++ b/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown @@ -14,7 +14,7 @@ Allocates a static IP address. ## Example Usage -``` +```hcl resource "aws_lightsail_static_ip" "test" { name = "example" } diff --git a/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown b/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown index 063a3cc83..97f9ca7ba 100644 --- a/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown @@ -14,7 +14,7 @@ Provides a static IP address attachment - relationship between a Lightsail stati ## Example Usage -``` +```hcl resource "aws_lightsail_static_ip_attachment" "test" { static_ip_name = "${aws_lightsail_static_ip.test.name}" instance_name = "${aws_lightsail_instance.test.name}" diff --git a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown index f40cfbd34..2b4a8c3fe 100644 --- a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown @@ -13,7 +13,7 @@ Attaches a load balancer policy to an ELB backend server. ## Example Usage -``` +```hcl resource "aws_elb" "wu-tang" { name = "wu-tang" availability_zones = ["us-east-1a"] @@ -65,7 +65,7 @@ resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policie Where the file `pubkey` in the current directory contains only the _public key_ of the certificate. -``` +```shell cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey ``` diff --git a/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown index ead1331a3..9ef19e262 100644 --- a/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown @@ -13,7 +13,7 @@ Attaches a load balancer policy to an ELB Listener. ## Example Usage -``` +```hcl resource "aws_elb" "wu-tang" { name = "wu-tang" availability_zones = ["us-east-1a"] diff --git a/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown index 3dec164ad..178fcfb41 100644 --- a/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown @@ -12,7 +12,7 @@ Provides a load balancer policy, which can be attached to an ELB listener or bac ## Example Usage -``` +```hcl resource "aws_elb" "wu-tang" { name = "wu-tang" availability_zones = ["us-east-1a"] @@ -89,7 +89,7 @@ resource "aws_load_balancer_listener_policy" "wu-tang-listener-policies-443" { Where the file `pubkey` in the current directory contains only the _public key_ of the certificate. -``` +```shell cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey ``` diff --git a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown index de4fb7341..2551ad738 100644 --- a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown +++ b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown @@ -12,7 +12,7 @@ Provides a resource for managing the main routing table of a VPC. ## Example Usage -``` +```hcl resource "aws_main_route_table_association" "a" { vpc_id = "${aws_vpc.foo.id}" route_table_id = "${aws_route_table.bar.id}" diff --git a/website/source/docs/providers/aws/r/nat_gateway.html.markdown b/website/source/docs/providers/aws/r/nat_gateway.html.markdown index 708352086..b5a397b25 100644 --- a/website/source/docs/providers/aws/r/nat_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/nat_gateway.html.markdown @@ -12,7 +12,7 @@ Provides a resource to create a VPC NAT Gateway. ## Example Usage -``` +```hcl resource "aws_nat_gateway" "gw" { allocation_id = "${aws_eip.nat.id}" subnet_id = "${aws_subnet.public.id}" diff --git a/website/source/docs/providers/aws/r/network_acl.html.markdown b/website/source/docs/providers/aws/r/network_acl.html.markdown index 28143f0cc..adb73dcc6 100644 --- a/website/source/docs/providers/aws/r/network_acl.html.markdown +++ b/website/source/docs/providers/aws/r/network_acl.html.markdown @@ -13,7 +13,7 @@ to your security groups in order to add an additional layer of security to your ## Example Usage -``` +```hcl resource "aws_network_acl" "main" { vpc_id = "${aws_vpc.main.id}" diff --git a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown index 87912249a..fc9805526 100644 --- a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown +++ b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown @@ -12,7 +12,7 @@ Creates an entry (a rule) in a network ACL with the specified rule number. ## Example Usage -``` +```hcl resource "aws_network_acl" "bar" { vpc_id = "${aws_vpc.foo.id}" } diff --git a/website/source/docs/providers/aws/r/network_interface.markdown b/website/source/docs/providers/aws/r/network_interface.markdown index eccd979c1..d177da470 100644 --- a/website/source/docs/providers/aws/r/network_interface.markdown +++ b/website/source/docs/providers/aws/r/network_interface.markdown @@ -12,7 +12,7 @@ Provides an Elastic network interface (ENI) resource. ## Example Usage -``` +```hcl resource "aws_network_interface" "test" { subnet_id = "${aws_subnet.public_a.id}" private_ips = ["10.0.0.50"] diff --git a/website/source/docs/providers/aws/r/opsworks_application.html.markdown b/website/source/docs/providers/aws/r/opsworks_application.html.markdown index 34a3e0157..cbe437d69 100644 --- a/website/source/docs/providers/aws/r/opsworks_application.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_application.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks application resource. ## Example Usage -``` +```hcl resource "aws_opsworks_application" "foo-app" { name = "foobar application" short_name = "foobar" diff --git a/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown index 13ccfc62a..81427aed5 100644 --- a/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks custom layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_custom_layer" "custlayer" { name = "My Awesome Custom Layer" short_name = "awesome" @@ -66,7 +66,6 @@ The following attributes are exported: * `id` - The id of the layer. - ## Import OpsWorks Custom Layers can be imported using the `id`, e.g. diff --git a/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown index 5b43f515a..a3be106ea 100644 --- a/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks Ganglia layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_ganglia_layer" "monitor" { stack_id = "${aws_opsworks_stack.main.id}" password = "foobarbaz" diff --git a/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown index 447522ec1..ca0427b8a 100644 --- a/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks haproxy layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_haproxy_layer" "lb" { stack_id = "${aws_opsworks_stack.main.id}" stats_password = "foobarbaz" diff --git a/website/source/docs/providers/aws/r/opsworks_instance.html.markdown b/website/source/docs/providers/aws/r/opsworks_instance.html.markdown index cebe99dc7..fa06aff52 100644 --- a/website/source/docs/providers/aws/r/opsworks_instance.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_instance.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks instance resource. ## Example Usage -``` +```hcl resource "aws_opsworks_instance" "my-instance" { stack_id = "${aws_opsworks_stack.my-stack.id}" diff --git a/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown index 7a0dd0cd5..25b680175 100644 --- a/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks Java application layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_java_app_layer" "app" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown index 6af1bb402..b89172fb5 100644 --- a/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks memcached layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_memcached_layer" "cache" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown index 6032083ef..df9bad717 100644 --- a/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown @@ -15,7 +15,7 @@ Provides an OpsWorks MySQL layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_mysql_layer" "db" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown index 94cba6a3b..f5e52ac78 100644 --- a/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks NodeJS application layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_nodejs_app_layer" "app" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_permission.html.markdown b/website/source/docs/providers/aws/r/opsworks_permission.html.markdown index 2c45c22d1..408e2538f 100644 --- a/website/source/docs/providers/aws/r/opsworks_permission.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_permission.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks permission resource. ## Example Usage -``` +```hcl resource "aws_opsworks_permission" "my_stack_permission" { allow_ssh = true allow_sudo = true diff --git a/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown index 4af6cfb6b..963684f87 100644 --- a/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks PHP application layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_php_app_layer" "app" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown index 82a945ccd..84cd69d6c 100644 --- a/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks Ruby on Rails application layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_rails_app_layer" "app" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown b/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown index 542dd956f..496d253c0 100644 --- a/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown @@ -15,7 +15,7 @@ Provides an OpsWorks RDS DB Instance resource. ## Example Usage -``` +```hcl resource "aws_opsworks_rds_db_instance" "my_instance" { stack_id = "${aws_opsworks_stack.my_stack.id}" rds_db_instance_arn = "${aws_db_instance.my_instance.arn}" diff --git a/website/source/docs/providers/aws/r/opsworks_stack.html.markdown b/website/source/docs/providers/aws/r/opsworks_stack.html.markdown index 1e37b05ec..f9cceee7f 100644 --- a/website/source/docs/providers/aws/r/opsworks_stack.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_stack.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks stack resource. ## Example Usage -``` +```hcl resource "aws_opsworks_stack" "main" { name = "awesome-stack" region = "us-west-1" @@ -78,7 +78,6 @@ The following attributes are exported: * `id` - The id of the stack. - ## Import OpsWorks stacks can be imported using the `id`, e.g. diff --git a/website/source/docs/providers/aws/r/opsworks_static_web_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_static_web_layer.html.markdown index 930af7b18..967f45fa3 100644 --- a/website/source/docs/providers/aws/r/opsworks_static_web_layer.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_static_web_layer.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks static web server layer resource. ## Example Usage -``` +```hcl resource "aws_opsworks_static_web_layer" "web" { stack_id = "${aws_opsworks_stack.main.id}" } diff --git a/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown b/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown index e89541b26..b1f26c70f 100644 --- a/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown +++ b/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown @@ -12,7 +12,7 @@ Provides an OpsWorks User Profile resource. ## Example Usage -``` +```hcl resource "aws_opsworks_user_profile" "my_profile" { user_arn = "${aws_iam_user.user.arn}" ssh_username = "my_user" diff --git a/website/source/docs/providers/aws/r/placement_group.html.markdown b/website/source/docs/providers/aws/r/placement_group.html.markdown index 584478805..7c5249544 100644 --- a/website/source/docs/providers/aws/r/placement_group.html.markdown +++ b/website/source/docs/providers/aws/r/placement_group.html.markdown @@ -13,7 +13,7 @@ in [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-grou ## Example Usage -``` +```hcl resource "aws_placement_group" "web" { name = "hunky-dory-pg" strategy = "cluster" diff --git a/website/source/docs/providers/aws/r/proxy_protocol_policy.html.markdown b/website/source/docs/providers/aws/r/proxy_protocol_policy.html.markdown index 994221089..dbd46ba5b 100644 --- a/website/source/docs/providers/aws/r/proxy_protocol_policy.html.markdown +++ b/website/source/docs/providers/aws/r/proxy_protocol_policy.html.markdown @@ -12,7 +12,7 @@ Provides a proxy protocol policy, which allows an ELB to carry a client connecti ## Example Usage -``` +```hcl resource "aws_elb" "lb" { name = "test-lb" availability_zones = ["us-east-1a"] diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index 6139824f1..d042fcbbb 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -31,7 +31,7 @@ for more information. ## Example Usage -``` +```hcl resource "aws_rds_cluster" "default" { cluster_identifier = "aurora-cluster-demo" availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] @@ -111,7 +111,6 @@ load-balanced across replicas * `replication_source_identifier` - ARN of the source DB cluster if this DB cluster is created as a Read Replica. [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html - [2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html [3]: /docs/providers/aws/r/rds_cluster_instance.html [4]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 031972d4b..18c9b3b75 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -23,7 +23,7 @@ For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amaz ## Example Usage -``` +```hcl resource "aws_rds_cluster_instance" "cluster_instances" { count = 2 identifier = "aurora-cluster-demo-${count.index}" diff --git a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown index c2fa63d78..b52a4dd08 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown @@ -10,7 +10,7 @@ Provides an RDS DB cluster parameter group resource. ## Example Usage -``` +```hcl resource "aws_rds_cluster_parameter_group" "default" { name = "rds-cluster-pg" family = "aurora5.6" diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown index 99d6ac66e..bcee8cdba 100644 --- a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown @@ -13,7 +13,7 @@ Provides a Redshift Cluster Resource. ## Example Usage -``` +```hcl resource "aws_redshift_cluster" "default" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" diff --git a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown index 559e26f17..65472b540 100644 --- a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown @@ -10,7 +10,7 @@ Provides a Redshift Cluster parameter group resource. ## Example Usage -``` +```hcl resource "aws_redshift_parameter_group" "bar" { name = "parameter-group-test-terraform" family = "redshift-1.0" diff --git a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown index 94924d1b8..4d6dc0367 100644 --- a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown @@ -12,7 +12,7 @@ Creates a new Amazon Redshift security group. You use security groups to control ## Example Usage -``` +```hcl resource "aws_redshift_security_group" "default" { name = "redshift-sg" diff --git a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown index db454ae08..b5deadfd9 100644 --- a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown @@ -12,7 +12,7 @@ Creates a new Amazon Redshift subnet group. You must provide a list of one or mo ## Example Usage -``` +```hcl resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" } diff --git a/website/source/docs/providers/aws/r/route.html.markdown b/website/source/docs/providers/aws/r/route.html.markdown index b6589298f..dafe52651 100644 --- a/website/source/docs/providers/aws/r/route.html.markdown +++ b/website/source/docs/providers/aws/r/route.html.markdown @@ -18,7 +18,7 @@ a conflict of rule settings and will overwrite rules. ## Example usage: -``` +```hcl resource "aws_route" "r" { route_table_id = "rtb-4fbb3ac4" destination_cidr_block = "10.0.1.0/22" @@ -29,7 +29,7 @@ resource "aws_route" "r" { ##Example IPv6 Usage: -``` +```hcl resource "aws_vpc" "vpc" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true diff --git a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown b/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown index 6efda1363..12f665a5c 100644 --- a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown +++ b/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown @@ -12,7 +12,7 @@ Provides a [Route53 Delegation Set](https://docs.aws.amazon.com/Route53/latest/A ## Example Usage -``` +```hcl resource "aws_route53_delegation_set" "main" { reference_name = "DynDNS" } diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown index 1d9aa73b0..815174759 100644 --- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown +++ b/website/source/docs/providers/aws/r/route53_health_check.html.markdown @@ -11,7 +11,7 @@ Provides a Route53 health check. ## Example Usage -``` +```hcl resource "aws_route53_health_check" "child1" { fqdn = "foobar.terraform.com" port = 80 @@ -38,7 +38,7 @@ resource "aws_route53_health_check" "foo" { ## CloudWatch Alarm Example -``` +```hcl resource "aws_cloudwatch_metric_alarm" "foobar" { alarm_name = "terraform-test-foobar5" comparison_operator = "GreaterThanOrEqualToThreshold" diff --git a/website/source/docs/providers/aws/r/route53_record.html.markdown b/website/source/docs/providers/aws/r/route53_record.html.markdown index 9d5a35db9..ad6a7c725 100644 --- a/website/source/docs/providers/aws/r/route53_record.html.markdown +++ b/website/source/docs/providers/aws/r/route53_record.html.markdown @@ -14,7 +14,7 @@ Provides a Route53 record resource. ### Simple routing policy -``` +```hcl resource "aws_route53_record" "www" { zone_id = "${aws_route53_zone.primary.zone_id}" name = "www.example.com" @@ -27,7 +27,7 @@ resource "aws_route53_record" "www" { ### Weighted routing policy Other routing policies are configured similarly. See [AWS Route53 Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) for details. -``` +```hcl resource "aws_route53_record" "www-dev" { zone_id = "${aws_route53_zone.primary.zone_id}" name = "www" @@ -64,7 +64,7 @@ to understand differences between alias and non-alias records. TTL for all alias records is [60 seconds](https://aws.amazon.com/route53/faqs/#dns_failover_do_i_need_to_adjust), you cannot change this, therefore `ttl` has to be omitted in alias records. -``` +```hcl resource "aws_elb" "main" { name = "foobar-terraform-elb" availability_zones = ["us-east-1c"] diff --git a/website/source/docs/providers/aws/r/route53_zone.html.markdown b/website/source/docs/providers/aws/r/route53_zone.html.markdown index 2e776f654..bab319f2e 100644 --- a/website/source/docs/providers/aws/r/route53_zone.html.markdown +++ b/website/source/docs/providers/aws/r/route53_zone.html.markdown @@ -12,7 +12,7 @@ Provides a Route53 Hosted Zone resource. ## Example Usage -``` +```hcl resource "aws_route53_zone" "primary" { name = "example.com" } @@ -22,7 +22,7 @@ For use in subdomains, note that you need to create a `aws_route53_record` of type `NS` as well as the subdomain zone. -``` +```hcl resource "aws_route53_zone" "main" { name = "example.com" } diff --git a/website/source/docs/providers/aws/r/route53_zone_association.html.markdown b/website/source/docs/providers/aws/r/route53_zone_association.html.markdown index 82efc89a9..9136e3d2a 100644 --- a/website/source/docs/providers/aws/r/route53_zone_association.html.markdown +++ b/website/source/docs/providers/aws/r/route53_zone_association.html.markdown @@ -12,7 +12,7 @@ Provides a Route53 private Hosted Zone to VPC association resource. ## Example Usage -``` +```hcl resource "aws_vpc" "primary" { cidr_block = "10.6.0.0/16" enable_dns_hostnames = true diff --git a/website/source/docs/providers/aws/r/route_table.html.markdown b/website/source/docs/providers/aws/r/route_table.html.markdown index f8e79d6fa..88ddc17ed 100644 --- a/website/source/docs/providers/aws/r/route_table.html.markdown +++ b/website/source/docs/providers/aws/r/route_table.html.markdown @@ -18,7 +18,7 @@ a conflict of rule settings and will overwrite rules. ## Example usage with tags: -``` +```hcl resource "aws_route_table" "r" { vpc_id = "${aws_vpc.default.id}" diff --git a/website/source/docs/providers/aws/r/route_table_association.html.markdown b/website/source/docs/providers/aws/r/route_table_association.html.markdown index 5c95bf7b2..149b6c54e 100644 --- a/website/source/docs/providers/aws/r/route_table_association.html.markdown +++ b/website/source/docs/providers/aws/r/route_table_association.html.markdown @@ -12,7 +12,7 @@ Provides a resource to create an association between a subnet and routing table. ## Example Usage -``` +```hcl resource "aws_route_table_association" "a" { subnet_id = "${aws_subnet.foo.id}" route_table_id = "${aws_route_table.bar.id}" diff --git a/website/source/docs/providers/aws/r/s3_bucket.html.markdown b/website/source/docs/providers/aws/r/s3_bucket.html.markdown index 07bff02c4..f91cebe65 100644 --- a/website/source/docs/providers/aws/r/s3_bucket.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket.html.markdown @@ -14,7 +14,7 @@ Provides a S3 bucket resource. ### Private Bucket w/ Tags -``` +```hcl resource "aws_s3_bucket" "b" { bucket = "my_tf_test_bucket" acl = "private" @@ -28,7 +28,7 @@ resource "aws_s3_bucket" "b" { ### Static Website Hosting -``` +```hcl resource "aws_s3_bucket" "b" { bucket = "s3-website-test.hashicorp.com" acl = "public-read" @@ -54,7 +54,7 @@ EOF ### Using CORS -``` +```hcl resource "aws_s3_bucket" "b" { bucket = "s3-website-test.hashicorp.com" acl = "public-read" @@ -71,7 +71,7 @@ resource "aws_s3_bucket" "b" { ### Using versioning -``` +```hcl resource "aws_s3_bucket" "b" { bucket = "my_tf_test_bucket" acl = "private" @@ -84,7 +84,7 @@ resource "aws_s3_bucket" "b" { ### Enable Logging -``` +```hcl resource "aws_s3_bucket" "log_bucket" { bucket = "my_tf_log_bucket" acl = "log-delivery-write" @@ -103,7 +103,7 @@ resource "aws_s3_bucket" "b" { ### Using object lifecycle -``` +```hcl resource "aws_s3_bucket" "bucket" { bucket = "my-bucket" acl = "private" @@ -170,7 +170,7 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using replication configuration -``` +```hcl provider "aws" { alias = "west" region = "eu-west-1" diff --git a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown index 3198f5602..a3c4826b2 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown @@ -14,7 +14,7 @@ Provides a S3 bucket notification resource. ### Add notification configuration to SNS Topic -``` +```hcl resource "aws_sns_topic" "topic" { name = "s3-event-notification-topic" @@ -51,7 +51,7 @@ resource "aws_s3_bucket_notification" "bucket_notification" { ### Add notification configuration to SQS Queue -``` +```hcl resource "aws_sqs_queue" "queue" { name = "s3-event-notification-queue" @@ -90,7 +90,7 @@ resource "aws_s3_bucket_notification" "bucket_notification" { ### Add notification configuration to Lambda Function -``` +```hcl resource "aws_iam_role" "iam_for_lambda" { name = "iam_for_lambda" @@ -143,7 +143,7 @@ resource "aws_s3_bucket_notification" "bucket_notification" { ### Trigger multiple Lambda functions -``` +```hcl resource "aws_iam_role" "iam_for_lambda" { name = "iam_for_lambda" @@ -218,7 +218,7 @@ resource "aws_s3_bucket_notification" "bucket_notification" { ### Add multiple notification configurations to SQS Queue -``` +```hcl resource "aws_sqs_queue" "queue" { name = "s3-event-notification-queue" @@ -265,7 +265,7 @@ resource "aws_s3_bucket_notification" "bucket_notification" { For Terraform's [JSON syntax](https://www.terraform.io/docs/configuration/syntax.html), use an array instead of defining the `queue` key twice. -``` +```json { "bucket": "${aws_s3_bucket.bucket.id}", "queue": [ diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index 322faa943..1432668ea 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -14,7 +14,7 @@ Provides a S3 bucket object resource. ### Uploading a file to a bucket -``` +```hcl resource "aws_s3_bucket_object" "object" { bucket = "your_bucket_name" key = "new_object_key" @@ -25,7 +25,7 @@ resource "aws_s3_bucket_object" "object" { ### Encrypting with KMS Key -``` +```hcl resource "aws_kms_key" "examplekms" { description = "KMS key 1" deletion_window_in_days = 7 @@ -46,7 +46,7 @@ resource "aws_s3_bucket_object" "examplebucket_object" { ### Server Side Encryption with S3 Default Master Key -``` +```hcl resource "aws_s3_bucket" "examplebucket" { bucket = "examplebuckettftest" acl = "private" diff --git a/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown index b45fadeaf..250ac0439 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown @@ -14,7 +14,7 @@ Attaches a policy to an S3 bucket resource. ### Using versioning -``` +```hcl resource "aws_s3_bucket" "b" { # Arguments } diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown index 7404de588..3f8597372 100644 --- a/website/source/docs/providers/aws/r/security_group.html.markdown +++ b/website/source/docs/providers/aws/r/security_group.html.markdown @@ -21,7 +21,7 @@ a conflict of rule settings and will overwrite rules. Basic usage -``` +```hcl resource "aws_security_group" "allow_all" { name = "allow_all" description = "Allow all inbound traffic" diff --git a/website/source/docs/providers/aws/r/security_group_rule.html.markdown b/website/source/docs/providers/aws/r/security_group_rule.html.markdown index 56deb5b47..bdf70c4b6 100644 --- a/website/source/docs/providers/aws/r/security_group_rule.html.markdown +++ b/website/source/docs/providers/aws/r/security_group_rule.html.markdown @@ -22,7 +22,7 @@ a conflict of rule settings and will overwrite rules. Basic usage -``` +```hcl resource "aws_security_group_rule" "allow_all" { type = "ingress" from_port = 0 @@ -60,7 +60,7 @@ Prefix list IDs are manged by AWS internally. Prefix list IDs are associated with a prefix list name, or service name, that is linked to a specific region. Prefix list IDs are exported on VPC Endpoints, so you can use this format: -``` +```hcl resource "aws_security_group_rule" "allow_all" { type = "egress" to_port = 0 diff --git a/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown index 49ce6dded..0a581443a 100644 --- a/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown +++ b/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown @@ -12,7 +12,7 @@ Provides a resource to designate the active SES receipt rule set ## Example Usage -``` +```hcl resource "aws_ses_active_receipt_rule_set" "main" { rule_set_name = "primary-rules" } diff --git a/website/source/docs/providers/aws/r/ses_configuration_set.markdown b/website/source/docs/providers/aws/r/ses_configuration_set.markdown index 7c289aed1..9ea3b374e 100644 --- a/website/source/docs/providers/aws/r/ses_configuration_set.markdown +++ b/website/source/docs/providers/aws/r/ses_configuration_set.markdown @@ -12,7 +12,7 @@ Provides an SES configuration set resource ## Example Usage -``` +```hcl resource "aws_ses_configuration_set" "test" { name = "some-configuration-set-test" } diff --git a/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown b/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown index e005160bf..fc8dfc174 100644 --- a/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown +++ b/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown @@ -30,7 +30,7 @@ The following attributes are exported: ## Example Usage -``` +```hcl resource "aws_ses_domain_identity" "example" { domain = "example.com" } diff --git a/website/source/docs/providers/aws/r/ses_event_destination.markdown b/website/source/docs/providers/aws/r/ses_event_destination.markdown index 5bb004f9e..794953bc5 100644 --- a/website/source/docs/providers/aws/r/ses_event_destination.markdown +++ b/website/source/docs/providers/aws/r/ses_event_destination.markdown @@ -12,7 +12,7 @@ Provides an SES event destination ## Example Usage -``` +```hcl # Add a firehose event destination to a configuration set resource "aws_ses_event_destination" "kinesis" { name = "event-destination-kinesis" diff --git a/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown index eca8cbc57..d45f710dc 100644 --- a/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown +++ b/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown @@ -12,7 +12,7 @@ Provides an SES receipt filter resource ## Example Usage -``` +```hcl resource "aws_ses_receipt_filter" "filter" { name = "block-spammer" cidr = "10.10.10.10" diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown index ff6cbb83b..d08bf81ab 100644 --- a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown +++ b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown @@ -12,7 +12,7 @@ Provides an SES receipt rule resource ## Example Usage -``` +```hcl # Add a header to the email and store it in S3 resource "aws_ses_receipt_rule" "store" { name = "store" diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown index 40da25d04..f8ec307ed 100644 --- a/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown +++ b/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown @@ -12,7 +12,7 @@ Provides an SES receipt rule set resource ## Example Usage -``` +```hcl resource "aws_ses_receipt_rule_set" "main" { rule_set_name = "primary-rules" } diff --git a/website/source/docs/providers/aws/r/sfn_activity.html.markdown b/website/source/docs/providers/aws/r/sfn_activity.html.markdown index a5a52d517..e6e9407ec 100644 --- a/website/source/docs/providers/aws/r/sfn_activity.html.markdown +++ b/website/source/docs/providers/aws/r/sfn_activity.html.markdown @@ -12,7 +12,7 @@ Provides a Step Function Activity resource ## Example Usage -``` +```hcl resource "aws_sfn_activity" "sfn_activity" { name = "my-activity" } diff --git a/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown b/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown index 71c31a31f..e9bd9efbd 100644 --- a/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown +++ b/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown @@ -12,7 +12,7 @@ Provides a Step Function State Machine resource ## Example Usage -``` +```hcl # ... resource "aws_sfn_state_machine" "sfn_state_machine" { diff --git a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown index 2b031ec6e..2b78d32b7 100644 --- a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown +++ b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown @@ -12,7 +12,7 @@ Provides a SimpleDB domain resource ## Example Usage -``` +```hcl resource "aws_simpledb_domain" "users" { name = "users" } diff --git a/website/source/docs/providers/aws/r/snapshot_create_volume_permission.html.markdown b/website/source/docs/providers/aws/r/snapshot_create_volume_permission.html.markdown index 6d571725a..247184039 100644 --- a/website/source/docs/providers/aws/r/snapshot_create_volume_permission.html.markdown +++ b/website/source/docs/providers/aws/r/snapshot_create_volume_permission.html.markdown @@ -12,7 +12,7 @@ Adds permission to create volumes off of a given EBS Snapshot. ## Example Usage -``` +```hcl resource "aws_snapshot_create_volume_permission" "example_perm" { snapshot_id = "${aws_ebs_snapshot.example_snapshot.id}" account_id = "12345678" diff --git a/website/source/docs/providers/aws/r/sns_topic.html.markdown b/website/source/docs/providers/aws/r/sns_topic.html.markdown index 13ce6bc9b..c4f1218ef 100644 --- a/website/source/docs/providers/aws/r/sns_topic.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic.html.markdown @@ -12,7 +12,7 @@ Provides an SNS topic resource ## Example Usage -``` +```hcl resource "aws_sns_topic" "user_updates" { name = "user-updates-topic" } diff --git a/website/source/docs/providers/aws/r/sns_topic_policy.html.markdown b/website/source/docs/providers/aws/r/sns_topic_policy.html.markdown index a21914384..e95312b2e 100644 --- a/website/source/docs/providers/aws/r/sns_topic_policy.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic_policy.html.markdown @@ -12,7 +12,7 @@ Provides an SNS topic policy resource ## Example Usage -``` +```hcl resource "aws_sns_topic" "test" { name = "my-topic-with-policy" } diff --git a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown index b01084186..cba8d5015 100644 --- a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown @@ -25,7 +25,7 @@ probably be SQS queues. You can directly supply a topic and ARN by hand in the `topic_arn` property along with the queue ARN: -``` +```hcl resource "aws_sns_topic_subscription" "user_updates_sqs_target" { topic_arn = "arn:aws:sns:us-west-2:432981146916:user-updates-topic" protocol = "sqs" @@ -35,7 +35,7 @@ resource "aws_sns_topic_subscription" "user_updates_sqs_target" { Alternatively you can use the ARN properties of a managed SNS topic and SQS queue: -``` +```hcl resource "aws_sns_topic" "user_updates" { name = "user-updates-topic" } @@ -50,9 +50,10 @@ resource "aws_sns_topic_subscription" "user_updates_sqs_target" { endpoint = "${aws_sqs_queue.user_updates_queue.arn}" } ``` + You can subscribe SNS topics to SQS queues in different Amazon accounts and regions: -``` +```hcl /* # # Variables @@ -272,7 +273,6 @@ Endpoints have different format requirements according to the protocol that is c * SQS endpoints come in the form of the SQS queue's ARN (not the URL of the queue) e.g: `arn:aws:sqs:us-west-2:432981146916:terraform-queue-too` * Application endpoints are also the endpoint ARN for the mobile app and device. - ## Attributes Reference The following attributes are exported: @@ -283,7 +283,6 @@ The following attributes are exported: * `endpoint` - The full endpoint to send data to (SQS ARN, HTTP(S) URL, Application ARN, SMS number, etc.) * `arn` - The ARN of the subscription stored as a more user-friendly property - ## Import SNS Topic Subscriptions can be imported using the `subscription arn`, e.g. diff --git a/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown b/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown index ea1def561..e1cac15d9 100644 --- a/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown +++ b/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown @@ -15,7 +15,7 @@ This data feed is sent to an Amazon S3 bucket that you specify when you subscrib ## Example Usage -``` +```hcl resource "aws_s3_bucket" "default" { bucket = "tf-spot-datafeed" } @@ -30,7 +30,6 @@ resource "aws_spot_datafeed_subscription" "default" { * `bucket` - (Required) The Amazon S3 bucket in which to store the Spot instance data feed. * `prefix` - (Optional) Path of folder inside bucket to place spot pricing data. - ## Import A Spot Datafeed Subscription can be imported using the word `spot-datafeed-subscription`, e.g. diff --git a/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown b/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown index b4ac14a99..b96e5c847 100644 --- a/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown +++ b/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown @@ -13,7 +13,7 @@ instances to be requested on the Spot market. ## Example Usage -``` +```hcl # Request a Spot fleet resource "aws_spot_fleet_request" "cheap_compute" { iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet" @@ -48,7 +48,7 @@ resource "aws_spot_fleet_request" "cheap_compute" { ~> **NOTE:** Terraform does not support the functionality where multiple `subnet_id` or `availability_zone` parameters can be specified in the same launch configuration block. If you want to specify multiple values, then separate launch configuration blocks should be used: -``` +```hcl resource "aws_spot_fleet_request" "foo" { iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet" spot_price = "0.005" @@ -109,7 +109,6 @@ lowestPrice. (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. Defaults to 24 hours. - ## Attributes Reference The following attributes are exported: diff --git a/website/source/docs/providers/aws/r/spot_instance_request.html.markdown b/website/source/docs/providers/aws/r/spot_instance_request.html.markdown index 36020b61e..89e4fb824 100644 --- a/website/source/docs/providers/aws/r/spot_instance_request.html.markdown +++ b/website/source/docs/providers/aws/r/spot_instance_request.html.markdown @@ -30,7 +30,7 @@ for more information. ## Example Usage -``` +```hcl # Request a spot instance at $0.03 resource "aws_spot_instance_request" "cheap_worker" { ami = "ami-1234" diff --git a/website/source/docs/providers/aws/r/sqs_queue.html.markdown b/website/source/docs/providers/aws/r/sqs_queue.html.markdown index 38e42ef81..fbe384927 100644 --- a/website/source/docs/providers/aws/r/sqs_queue.html.markdown +++ b/website/source/docs/providers/aws/r/sqs_queue.html.markdown @@ -10,7 +10,7 @@ description: |- ## Example Usage -``` +```hcl resource "aws_sqs_queue" "terraform_queue" { name = "terraform-example-queue" delay_seconds = 90 @@ -23,7 +23,7 @@ resource "aws_sqs_queue" "terraform_queue" { ## FIFO queue -``` +```hcl resource "aws_sqs_queue" "terraform_queue" { name = "terraform-example-queue.fifo" fifo_queue = true diff --git a/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown b/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown index 4d96e00e0..ecb0ccb28 100644 --- a/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown +++ b/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown @@ -13,7 +13,7 @@ while referencing ARN of the queue within the policy. ## Example Usage -``` +```hcl resource "aws_sqs_queue" "q" { name = "examplequeue" } diff --git a/website/source/docs/providers/aws/r/ssm_activation.html.markdown b/website/source/docs/providers/aws/r/ssm_activation.html.markdown index 8dea12cce..8cba6236e 100644 --- a/website/source/docs/providers/aws/r/ssm_activation.html.markdown +++ b/website/source/docs/providers/aws/r/ssm_activation.html.markdown @@ -12,7 +12,7 @@ Registers an on-premises server or virtual machine with Amazon EC2 so that it ca ## Example Usage -``` +```hcl resource "aws_iam_role" "test_role" { name = "test_role" diff --git a/website/source/docs/providers/aws/r/ssm_association.html.markdown b/website/source/docs/providers/aws/r/ssm_association.html.markdown index ea57ecb9d..ae29a5325 100644 --- a/website/source/docs/providers/aws/r/ssm_association.html.markdown +++ b/website/source/docs/providers/aws/r/ssm_association.html.markdown @@ -12,7 +12,7 @@ Assosciates an SSM Document to an instance. ## Example Usage -``` +```hcl resource "aws_security_group" "tf_test_foo" { name = "tf_test_foo" description = "foo" diff --git a/website/source/docs/providers/aws/r/ssm_document.html.markdown b/website/source/docs/providers/aws/r/ssm_document.html.markdown index 369aff886..a8aace8d3 100644 --- a/website/source/docs/providers/aws/r/ssm_document.html.markdown +++ b/website/source/docs/providers/aws/r/ssm_document.html.markdown @@ -16,7 +16,7 @@ schema version you must recreate the resource. ## Example Usage -``` +```hcl resource "aws_ssm_document" "foo" { name = "test_document" document_type = "Command" diff --git a/website/source/docs/providers/aws/r/subnet.html.markdown b/website/source/docs/providers/aws/r/subnet.html.markdown index f7457d8ee..8fb8aebff 100644 --- a/website/source/docs/providers/aws/r/subnet.html.markdown +++ b/website/source/docs/providers/aws/r/subnet.html.markdown @@ -12,7 +12,7 @@ Provides an VPC subnet resource. ## Example Usage -``` +```hcl resource "aws_subnet" "main" { vpc_id = "${aws_vpc.main.id}" cidr_block = "10.0.1.0/24" @@ -49,8 +49,6 @@ The following attributes are exported: * `cidr_block` - The CIDR block for the subnet. * `vpc_id` - The VPC ID. - - ## Import Subnets can be imported using the `subnet id`, e.g. diff --git a/website/source/docs/providers/aws/r/volume_attachment.html.markdown b/website/source/docs/providers/aws/r/volume_attachment.html.markdown index aad0935f5..75cc933c9 100644 --- a/website/source/docs/providers/aws/r/volume_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/volume_attachment.html.markdown @@ -15,7 +15,7 @@ detach volumes from AWS Instances. ## Example Usage -``` +```hcl resource "aws_volume_attachment" "ebs_att" { device_name = "/dev/sdh" volume_id = "${aws_ebs_volume.example.id}" diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index a05b57d35..115da9d23 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -14,7 +14,7 @@ Provides an VPC resource. Basic usage: -``` +```hcl resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" } @@ -22,7 +22,7 @@ resource "aws_vpc" "main" { Basic usage with tags: -``` +```hcl resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" instance_tenancy = "dedicated" diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown index 7f5914a63..34bf01cc8 100644 --- a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown @@ -14,7 +14,7 @@ Provides a VPC DHCP Options resource. Basic usage: -``` +```hcl resource "aws_vpc_dhcp_options" "dns_resolver" { domain_name_servers = ["8.8.8.8", "8.8.4.4"] } @@ -22,7 +22,7 @@ resource "aws_vpc_dhcp_options" "dns_resolver" { Full usage: -``` +```hcl resource "aws_vpc_dhcp_options" "foo" { domain_name = "service.consul" domain_name_servers = ["127.0.0.1", "10.0.0.2"] diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown index e2839dfdc..2c55ecf10 100644 --- a/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown @@ -12,7 +12,7 @@ Provides a VPC DHCP Options Association resource. ## Example Usage -``` +```hcl resource "aws_vpc_dhcp_options_association" "dns_resolver" { vpc_id = "${aws_vpc.foo.id}" dhcp_options_id = "${aws_vpc_dhcp_options.foo.id}" diff --git a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown index a35cdedc8..4195ecc45 100644 --- a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown @@ -21,7 +21,7 @@ and will overwrite the association. Basic usage: -``` +```hcl resource "aws_vpc_endpoint" "private-s3" { vpc_id = "${aws_vpc.main.id}" service_name = "com.amazonaws.us-west-2.s3" @@ -45,7 +45,6 @@ The following attributes are exported: * `prefix_list_id` - The prefix list ID of the exposed service. * `cidr_blocks` - The list of CIDR blocks for the exposed service. - ## Import VPC Endpoints can be imported using the `vpc endpoint id`, e.g. diff --git a/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown index 5f518fbee..61975fe62 100644 --- a/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown @@ -20,7 +20,7 @@ Table Association resource. Doing so will cause a conflict of associations and w Basic usage: -``` +```hcl resource "aws_vpc_endpoint_route_table_association" "private_s3" { vpc_endpoint_id = "${aws_vpc_endpoint.s3.id}" route_table_id = "${aws_route_table.private.id}" diff --git a/website/source/docs/providers/aws/r/vpc_peering.html.markdown b/website/source/docs/providers/aws/r/vpc_peering.html.markdown index 3f8539192..cc7de2630 100644 --- a/website/source/docs/providers/aws/r/vpc_peering.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_peering.html.markdown @@ -16,7 +16,7 @@ use the `aws_vpc_peering_connection_accepter` resource to manage the accepter's ## Example Usage -``` +```hcl resource "aws_vpc_peering_connection" "foo" { peer_owner_id = "${var.peer_owner_id}" peer_vpc_id = "${aws_vpc.bar.id}" @@ -26,7 +26,7 @@ resource "aws_vpc_peering_connection" "foo" { Basic usage with connection options: -``` +```hcl resource "aws_vpc_peering_connection" "foo" { peer_owner_id = "${var.peer_owner_id}" peer_vpc_id = "${aws_vpc.bar.id}" @@ -44,7 +44,7 @@ resource "aws_vpc_peering_connection" "foo" { Basic usage with tags: -``` +```hcl resource "aws_vpc_peering_connection" "foo" { peer_owner_id = "${var.peer_owner_id}" peer_vpc_id = "${aws_vpc.bar.id}" diff --git a/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown b/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown index bfa780fe4..c330226b3 100644 --- a/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown @@ -18,7 +18,7 @@ connection into management. ## Example Usage -``` +```hcl provider "aws" { // Requester's credentials. } diff --git a/website/source/docs/providers/aws/r/vpn_connection.html.markdown b/website/source/docs/providers/aws/r/vpn_connection.html.markdown index 25ebe2f0e..d2776b24c 100644 --- a/website/source/docs/providers/aws/r/vpn_connection.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_connection.html.markdown @@ -13,7 +13,7 @@ Provides a VPN connection connected to a VPC. These objects can be connected to ## Example Usage -``` +```hcl resource "aws_vpc" "vpc" { cidr_block = "10.0.0.0/16" } diff --git a/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown b/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown index 7dd3fb68c..9d64e9eb0 100644 --- a/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown @@ -12,7 +12,7 @@ Provides a static route between a VPN connection and a customer gateway. ## Example Usage -``` +```hcl resource "aws_vpc" "vpc" { cidr_block = "10.0.0.0/16" } diff --git a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown index b805c3527..1ec90e744 100644 --- a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown @@ -12,7 +12,7 @@ Provides a resource to create a VPC VPN Gateway. ## Example Usage -``` +```hcl resource "aws_vpn_gateway" "vpn_gw" { vpc_id = "${aws_vpc.main.id}" diff --git a/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown index 2ae597dfa..6271598ff 100644 --- a/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown @@ -17,7 +17,7 @@ to an existing VPC by setting the [`vpc_id`](vpn_gateway.html#vpc_id) attribute ## Example Usage -``` +```hcl resource "aws_vpc" "network" { cidr_block = "10.0.0.0/16" } diff --git a/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown index 090f72c6c..0ccf628be 100644 --- a/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown +++ b/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown @@ -12,7 +12,7 @@ Provides a WAF Byte Match Set Resource ## Example Usage -``` +```hcl resource "aws_waf_byte_match_set" "byte_set" { name = "tf_waf_byte_match_set" diff --git a/website/source/docs/providers/aws/r/waf_ipset.html.markdown b/website/source/docs/providers/aws/r/waf_ipset.html.markdown index 0fc43b395..042d224e9 100644 --- a/website/source/docs/providers/aws/r/waf_ipset.html.markdown +++ b/website/source/docs/providers/aws/r/waf_ipset.html.markdown @@ -12,7 +12,7 @@ Provides a WAF IPSet Resource ## Example Usage -``` +```hcl resource "aws_waf_ipset" "ipset" { name = "tfIPSet" diff --git a/website/source/docs/providers/aws/r/waf_rule.html.markdown b/website/source/docs/providers/aws/r/waf_rule.html.markdown index 7df474cf9..0db2fa07c 100644 --- a/website/source/docs/providers/aws/r/waf_rule.html.markdown +++ b/website/source/docs/providers/aws/r/waf_rule.html.markdown @@ -12,7 +12,7 @@ Provides a WAF Rule Resource ## Example Usage -``` +```hcl resource "aws_waf_ipset" "ipset" { name = "tfIPSet" diff --git a/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown b/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown index 3e8c353fd..5b8635e2b 100644 --- a/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown +++ b/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown @@ -12,7 +12,7 @@ Provides a WAF Size Constraint Set Resource ## Example Usage -``` +```hcl resource "aws_waf_size_constraint_set" "size_constraint_set" { name = "tfsize_constraints" diff --git a/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown index 632528d15..9d8fab08e 100644 --- a/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown +++ b/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown @@ -12,7 +12,7 @@ Provides a WAF SQL Injection Match Set Resource ## Example Usage -``` +```hcl resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { name = "tf-sql_injection_match_set" diff --git a/website/source/docs/providers/aws/r/waf_web_acl.html.markdown b/website/source/docs/providers/aws/r/waf_web_acl.html.markdown index 0f0d5777f..801189657 100644 --- a/website/source/docs/providers/aws/r/waf_web_acl.html.markdown +++ b/website/source/docs/providers/aws/r/waf_web_acl.html.markdown @@ -12,7 +12,7 @@ Provides a WAF Web ACL Resource ## Example Usage -``` +```hcl resource "aws_waf_ipset" "ipset" { name = "tfIPSet" diff --git a/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown index 8a0aff0e8..29fd7ab99 100644 --- a/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown +++ b/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown @@ -12,7 +12,7 @@ Provides a WAF XSS Match Set Resource ## Example Usage -``` +```hcl resource "aws_waf_xss_match_set" "xss_match_set" { name = "xss_match_set" diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown index 6abb5f5a2..a4ae0c66d 100644 --- a/website/source/docs/providers/azure/index.html.markdown +++ b/website/source/docs/providers/azure/index.html.markdown @@ -19,7 +19,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Azure Provider provider "azure" { publish_settings = "${file("credentials.publishsettings")}" diff --git a/website/source/docs/providers/azure/r/affinity_group.html.markdown b/website/source/docs/providers/azure/r/affinity_group.html.markdown index db5ff1461..f09a60a53 100644 --- a/website/source/docs/providers/azure/r/affinity_group.html.markdown +++ b/website/source/docs/providers/azure/r/affinity_group.html.markdown @@ -12,7 +12,7 @@ Creates a new affinity group on Azure. ## Example Usage -``` +```hcl resource "azure_affinity_group" "terraform-main-group" { name = "terraform-group" location = "North Europe" diff --git a/website/source/docs/providers/azure/r/data_disk.html.markdown b/website/source/docs/providers/azure/r/data_disk.html.markdown index dca4ca770..484fd6ece 100644 --- a/website/source/docs/providers/azure/r/data_disk.html.markdown +++ b/website/source/docs/providers/azure/r/data_disk.html.markdown @@ -13,7 +13,7 @@ it will attach that disk. Otherwise it will create and attach a new empty disk. ## Example Usage -``` +```hcl resource "azure_data_disk" "data" { lun = 0 size = 10 diff --git a/website/source/docs/providers/azure/r/dns_server.html.markdown b/website/source/docs/providers/azure/r/dns_server.html.markdown index 6394146f1..311c2f59e 100644 --- a/website/source/docs/providers/azure/r/dns_server.html.markdown +++ b/website/source/docs/providers/azure/r/dns_server.html.markdown @@ -12,7 +12,7 @@ Creates a new DNS server definition to be used internally in Azure. ## Example Usage -``` +```hcl resource "azure_dns_server" "google-dns" { name = "google" dns_address = "8.8.8.8" diff --git a/website/source/docs/providers/azure/r/hosted_service.html.markdown b/website/source/docs/providers/azure/r/hosted_service.html.markdown index 9319ebaff..f16783dcc 100644 --- a/website/source/docs/providers/azure/r/hosted_service.html.markdown +++ b/website/source/docs/providers/azure/r/hosted_service.html.markdown @@ -12,7 +12,7 @@ Creates a new hosted service on Azure with its own .cloudapp.net domain. ## Example Usage -``` +```hcl resource "azure_hosted_service" "terraform-service" { name = "terraform-service" location = "North Europe" diff --git a/website/source/docs/providers/azure/r/instance.html.markdown b/website/source/docs/providers/azure/r/instance.html.markdown index 7da3beac6..75e8c20e9 100644 --- a/website/source/docs/providers/azure/r/instance.html.markdown +++ b/website/source/docs/providers/azure/r/instance.html.markdown @@ -13,7 +13,7 @@ machine in the deployment based on the specified configuration. ## Example Usage -``` +```hcl resource "azure_hosted_service" "terraform-service" { name = "terraform-service" location = "North Europe" diff --git a/website/source/docs/providers/azure/r/local_network_connection.html.markdown b/website/source/docs/providers/azure/r/local_network_connection.html.markdown index 85e7d5609..d9b3d3ef6 100644 --- a/website/source/docs/providers/azure/r/local_network_connection.html.markdown +++ b/website/source/docs/providers/azure/r/local_network_connection.html.markdown @@ -12,7 +12,7 @@ Defines a new connection to a remote network through a VPN tunnel. ## Example Usage -``` +```hcl resource "azure_local_network_connection" "localnet" { name = "terraform-local-network-connection" vpn_gateway_address = "45.12.189.2" diff --git a/website/source/docs/providers/azure/r/security_group.html.markdown b/website/source/docs/providers/azure/r/security_group.html.markdown index 8024c9273..31a4ea278 100644 --- a/website/source/docs/providers/azure/r/security_group.html.markdown +++ b/website/source/docs/providers/azure/r/security_group.html.markdown @@ -13,7 +13,7 @@ subscription. ## Example Usage -``` +```hcl resource "azure_security_group" "web" { name = "webservers" location = "West US" diff --git a/website/source/docs/providers/azure/r/security_group_rule.html.markdown b/website/source/docs/providers/azure/r/security_group_rule.html.markdown index c6981fd99..3b01cb092 100644 --- a/website/source/docs/providers/azure/r/security_group_rule.html.markdown +++ b/website/source/docs/providers/azure/r/security_group_rule.html.markdown @@ -24,7 +24,7 @@ updating the state with regards to them. ## Example Usage -``` +```hcl resource "azure_security_group" "web" { # ... } diff --git a/website/source/docs/providers/azure/r/sql_database_server.html.markdown b/website/source/docs/providers/azure/r/sql_database_server.html.markdown index 04afdaee5..baeb32ba4 100644 --- a/website/source/docs/providers/azure/r/sql_database_server.html.markdown +++ b/website/source/docs/providers/azure/r/sql_database_server.html.markdown @@ -12,7 +12,7 @@ Allocates a new SQL Database Server on Azure. ## Example Usage -``` +```hcl resource "azure_sql_database_server" "sql-serv" { name = "" location = "West US" diff --git a/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown b/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown index 490689c30..b5a02b3b2 100644 --- a/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown +++ b/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown @@ -12,7 +12,7 @@ Defines a new Firewall Rule to be applied across the given Database Servers. ## Example Usage -``` +```hcl resource "azure_sql_database_server" "sql-serv1" { # ... } diff --git a/website/source/docs/providers/azure/r/sql_database_service.html.markdown b/website/source/docs/providers/azure/r/sql_database_service.html.markdown index c074f7a80..f8cdf9687 100644 --- a/website/source/docs/providers/azure/r/sql_database_service.html.markdown +++ b/website/source/docs/providers/azure/r/sql_database_service.html.markdown @@ -12,7 +12,7 @@ Creates a new SQL database service on an Azure database server. ## Example Usage -``` +```hcl resource "azure_sql_database_service" "sql-server" { name = "terraform-testing-db-renamed" database_server_name = "flibberflabber" diff --git a/website/source/docs/providers/azure/r/storage_blob.html.markdown b/website/source/docs/providers/azure/r/storage_blob.html.markdown index 7778fc762..6f1d56aaf 100644 --- a/website/source/docs/providers/azure/r/storage_blob.html.markdown +++ b/website/source/docs/providers/azure/r/storage_blob.html.markdown @@ -12,7 +12,7 @@ Creates a new storage blob within a given storage container on Azure. ## Example Usage -``` +```hcl resource "azure_storage_blob" "foo" { name = "tftesting-blob" storage_service_name = "tfstorserv" @@ -20,7 +20,7 @@ resource "azure_storage_blob" "foo" { type = "PageBlob" size = 1024 } -```` +``` ## Argument Reference diff --git a/website/source/docs/providers/azure/r/storage_container.html.markdown b/website/source/docs/providers/azure/r/storage_container.html.markdown index c286b6a0e..3b09fd322 100644 --- a/website/source/docs/providers/azure/r/storage_container.html.markdown +++ b/website/source/docs/providers/azure/r/storage_container.html.markdown @@ -12,13 +12,13 @@ Creates a new storage container within a given storage service on Azure. ## Example Usage -``` +```hcl resource "azure_storage_container" "stor-cont" { name = "terraform-storage-container" container_access_type = "blob" storage_service_name = "tfstorserv" } -```` +``` ## Argument Reference diff --git a/website/source/docs/providers/azure/r/storage_queue.html.markdown b/website/source/docs/providers/azure/r/storage_queue.html.markdown index 2f8b4a65a..117d766a9 100644 --- a/website/source/docs/providers/azure/r/storage_queue.html.markdown +++ b/website/source/docs/providers/azure/r/storage_queue.html.markdown @@ -12,12 +12,12 @@ Creates a new storage queue within a given storage service on Azure. ## Example Usage -``` +```hcl resource "azure_storage_queue" "stor-queue" { name = "terraform-storage-queue" storage_service_name = "tfstorserv" } -```` +``` ## Argument Reference diff --git a/website/source/docs/providers/azure/r/storage_service.html.markdown b/website/source/docs/providers/azure/r/storage_service.html.markdown index 006805f46..619f45755 100644 --- a/website/source/docs/providers/azure/r/storage_service.html.markdown +++ b/website/source/docs/providers/azure/r/storage_service.html.markdown @@ -12,14 +12,14 @@ Creates a new storage service on Azure in which storage containers may be create ## Example Usage -``` +```hcl resource "azure_storage_service" "tfstor" { name = "tfstor" location = "West US" description = "Made by Terraform." account_type = "Standard_LRS" } -```` +``` ## Argument Reference diff --git a/website/source/docs/providers/azure/r/virtual_network.html.markdown b/website/source/docs/providers/azure/r/virtual_network.html.markdown index 0711ab9b1..092e013de 100644 --- a/website/source/docs/providers/azure/r/virtual_network.html.markdown +++ b/website/source/docs/providers/azure/r/virtual_network.html.markdown @@ -13,7 +13,7 @@ optionally be configured with a security group to be associated with the subnet. ## Example Usage -``` +```hcl resource "azure_virtual_network" "default" { name = "test-network" address_space = ["10.1.2.0/24"] diff --git a/website/source/docs/providers/azurerm/d/client_config.html.markdown b/website/source/docs/providers/azurerm/d/client_config.html.markdown index ef20b0d42..3bee99162 100644 --- a/website/source/docs/providers/azurerm/d/client_config.html.markdown +++ b/website/source/docs/providers/azurerm/d/client_config.html.markdown @@ -13,7 +13,7 @@ provider. ## Example Usage -``` +```hcl data "azurerm_client_config" "current" {} output "account_id" { diff --git a/website/source/docs/providers/azurerm/index.html.markdown b/website/source/docs/providers/azurerm/index.html.markdown index 1a79ea519..4fc423d8c 100644 --- a/website/source/docs/providers/azurerm/index.html.markdown +++ b/website/source/docs/providers/azurerm/index.html.markdown @@ -20,7 +20,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Microsoft Azure Provider provider "azurerm" { subscription_id = "..." diff --git a/website/source/docs/providers/azurerm/r/availability_set.html.markdown b/website/source/docs/providers/azurerm/r/availability_set.html.markdown index 446444db7..8dd5cd412 100644 --- a/website/source/docs/providers/azurerm/r/availability_set.html.markdown +++ b/website/source/docs/providers/azurerm/r/availability_set.html.markdown @@ -12,7 +12,7 @@ Create an availability set for virtual machines. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown b/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown index 43e63cf4c..0c64e24ec 100644 --- a/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown +++ b/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown @@ -12,7 +12,7 @@ A CDN Endpoint is the entity within a CDN Profile containing configuration infor ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown b/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown index 875e795d3..cc2990589 100644 --- a/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown +++ b/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown @@ -12,7 +12,7 @@ Create a CDN Profile to create a collection of CDN Endpoints. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" @@ -57,6 +57,6 @@ The following attributes are exported: CDN Profiles can be imported using the `resource id`, e.g. -``` +```hcl terraform import azurerm_cdn_profile.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1 ``` diff --git a/website/source/docs/providers/azurerm/r/container_registry.html.markdown b/website/source/docs/providers/azurerm/r/container_registry.html.markdown index df79b7255..d72132f1e 100644 --- a/website/source/docs/providers/azurerm/r/container_registry.html.markdown +++ b/website/source/docs/providers/azurerm/r/container_registry.html.markdown @@ -15,7 +15,7 @@ Create as an Azure Container Registry instance. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/container_service.html.markdown b/website/source/docs/providers/azurerm/r/container_service.html.markdown index 5319ce143..b06052f7d 100644 --- a/website/source/docs/providers/azurerm/r/container_service.html.markdown +++ b/website/source/docs/providers/azurerm/r/container_service.html.markdown @@ -14,7 +14,8 @@ Creates an Azure Container Service Instance [Read more about sensitive data in state](/docs/state/sensitive-data.html). ## Example Usage (DCOS) -``` + +```hcl resource "azurerm_resource_group" "test" { name = "acctestRG1" location = "West US" @@ -59,7 +60,7 @@ resource "azurerm_container_service" "test" { ## Example Usage (Kubernetes) -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestRG1" location = "West US" @@ -108,7 +109,8 @@ resource "azurerm_container_service" "test" { ``` ## Example Usage (Swarm) -``` + +```hcl resource "azurerm_resource_group" "test" { name = "acctestRG1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown index 317d16709..f1743607d 100644 --- a/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS A Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -31,6 +31,7 @@ resource "azurerm_dns_a_record" "test" { records = ["10.0.180.17"] } ``` + ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown index 021cae113..773bc0ef7 100644 --- a/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS AAAA Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -31,6 +31,7 @@ resource "azurerm_dns_aaaa_record" "test" { records = ["2607:f8b0:4009:1803::1005"] } ``` + ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown index 10832dff2..5e00c2e6b 100644 --- a/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS CNAME Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -31,6 +31,7 @@ resource "azurerm_dns_cname_record" "test" { record = "contoso.com" } ``` + ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown index 20981ea98..197759215 100644 --- a/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS MX Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown index 72c630f62..99d6fd946 100644 --- a/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS NS Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown index 521686684..34b5719b2 100644 --- a/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS SRV Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown index 6177a5448..14299be53 100644 --- a/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS TXT Records within Azure DNS. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown b/website/source/docs/providers/azurerm/r/dns_zone.html.markdown index 71d555dca..7a896c8aa 100644 --- a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_zone.html.markdown @@ -12,7 +12,7 @@ Enables you to manage DNS zones within Azure DNS. These zones are hosted on Azur ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/eventhub.html.markdown b/website/source/docs/providers/azurerm/r/eventhub.html.markdown index 12b00ab06..8392fee44 100644 --- a/website/source/docs/providers/azurerm/r/eventhub.html.markdown +++ b/website/source/docs/providers/azurerm/r/eventhub.html.markdown @@ -12,7 +12,7 @@ Creates a new Event Hub as a nested resource within a Event Hub Namespace. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown index 7b025170c..08faea08d 100644 --- a/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown @@ -12,7 +12,7 @@ Creates a new Event Hub Authorization Rule within an Event Hub. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" @@ -87,7 +87,6 @@ The following attributes are exported: * `secondary_connection_string` - The Secondary Connection String for the Event Hub Authorization Rule. - ## Import EventHubs can be imported using the `resource id`, e.g. diff --git a/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown index f10be3ba9..68758c263 100644 --- a/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown +++ b/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown @@ -12,7 +12,7 @@ Creates a new Event Hub Consumer Group as a nested resource within an Event Hub. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown index 95a329da4..0d69bdba3 100644 --- a/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown +++ b/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown @@ -12,7 +12,7 @@ Create an EventHub Namespace. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/key_vault.html.markdown b/website/source/docs/providers/azurerm/r/key_vault.html.markdown index 3977ee6ba..1e869ab7f 100644 --- a/website/source/docs/providers/azurerm/r/key_vault.html.markdown +++ b/website/source/docs/providers/azurerm/r/key_vault.html.markdown @@ -12,7 +12,7 @@ Create a Key Vault. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown index 2a868a109..e659bf2ec 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown @@ -12,7 +12,7 @@ Create a LoadBalancer Resource. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown index f63b782d9..307fc29ad 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown @@ -14,7 +14,7 @@ Create a LoadBalancer Backend Address Pool. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown index e4740694b..6e34d5a76 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown @@ -14,7 +14,7 @@ Create a LoadBalancer NAT pool. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown index 4eaaef724..49f87f58e 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown @@ -14,7 +14,7 @@ Create a LoadBalancer NAT Rule. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown index 0add4f613..a2e0853e0 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown @@ -14,7 +14,7 @@ Create a LoadBalancer Probe Resource. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown index ee7e2b9ae..cd8cf97b4 100644 --- a/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown @@ -14,7 +14,7 @@ Create a LoadBalancer Rule. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "LoadBalancerRG" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown index b4681b6e4..b70e99741 100644 --- a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown +++ b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown @@ -12,7 +12,7 @@ Creates a new local network gateway connection over which specific connections c ## Example Usage -``` +```hcl resource "azurerm_local_network_gateway" "home" { name = "backHome" resource_group_name = "${azurerm_resource_group.test.name}" diff --git a/website/source/docs/providers/azurerm/r/managed_disk.html.markdown b/website/source/docs/providers/azurerm/r/managed_disk.html.markdown index e0058df77..1337d94c1 100644 --- a/website/source/docs/providers/azurerm/r/managed_disk.html.markdown +++ b/website/source/docs/providers/azurerm/r/managed_disk.html.markdown @@ -12,7 +12,7 @@ Create a managed disk. ## Example Usage with Create Empty -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US 2" @@ -34,7 +34,7 @@ resource "azurerm_managed_disk" "test" { ## Example Usage with Create Copy -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US 2" diff --git a/website/source/docs/providers/azurerm/r/network_interface.html.markdown b/website/source/docs/providers/azurerm/r/network_interface.html.markdown index 92e197b06..82e925ac5 100644 --- a/website/source/docs/providers/azurerm/r/network_interface.html.markdown +++ b/website/source/docs/providers/azurerm/r/network_interface.html.markdown @@ -12,7 +12,7 @@ Network interface cards are virtual network cards that form the link between vir ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown index 54c20b709..1e49cfc0c 100644 --- a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown +++ b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown @@ -12,7 +12,7 @@ Create a network security group that contains a list of network security rules. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown b/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown index 5ceb39943..c30506706 100644 --- a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown @@ -12,7 +12,7 @@ Create a Network Security Rule. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/public_ip.html.markdown b/website/source/docs/providers/azurerm/r/public_ip.html.markdown index af5581686..e83df9321 100644 --- a/website/source/docs/providers/azurerm/r/public_ip.html.markdown +++ b/website/source/docs/providers/azurerm/r/public_ip.html.markdown @@ -12,7 +12,7 @@ Create a Public IP Address. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/redis_cache.html.markdown b/website/source/docs/providers/azurerm/r/redis_cache.html.markdown index 67f919931..f57686514 100644 --- a/website/source/docs/providers/azurerm/r/redis_cache.html.markdown +++ b/website/source/docs/providers/azurerm/r/redis_cache.html.markdown @@ -12,7 +12,7 @@ Creates a new Redis Cache Resource ## Example Usage (Basic) -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -35,7 +35,7 @@ resource "azurerm_redis_cache" "test" { ## Example Usage (Standard) -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -57,7 +57,8 @@ resource "azurerm_redis_cache" "test" { ``` ## Example Usage (Premium with Clustering) -``` + +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" @@ -105,7 +106,8 @@ The following arguments are supported: * `shard_count` - (Optional) *Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster. * `redis_configuration` - (Required) Potential Redis configuration values - with some limitations by SKU - defaults/details are shown below. -``` + +```hcl redis_configuration { maxclients = "512" maxmemory_reserve" = "10" diff --git a/website/source/docs/providers/azurerm/r/resource_group.html.markdown b/website/source/docs/providers/azurerm/r/resource_group.html.markdown index 60e7ed282..dcb8fbb0c 100644 --- a/website/source/docs/providers/azurerm/r/resource_group.html.markdown +++ b/website/source/docs/providers/azurerm/r/resource_group.html.markdown @@ -12,7 +12,7 @@ Creates a new resource group on Azure. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "testResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/route.html.markdown b/website/source/docs/providers/azurerm/r/route.html.markdown index c43b20b44..2602150d7 100644 --- a/website/source/docs/providers/azurerm/r/route.html.markdown +++ b/website/source/docs/providers/azurerm/r/route.html.markdown @@ -12,7 +12,7 @@ Creates a new Route Resource ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/route_table.html.markdown b/website/source/docs/providers/azurerm/r/route_table.html.markdown index 76a498857..5b2432662 100644 --- a/website/source/docs/providers/azurerm/r/route_table.html.markdown +++ b/website/source/docs/providers/azurerm/r/route_table.html.markdown @@ -12,7 +12,7 @@ Creates a new Route Table Resource ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/search_service.html.markdown b/website/source/docs/providers/azurerm/r/search_service.html.markdown index 6385e919d..a0e22c201 100644 --- a/website/source/docs/providers/azurerm/r/search_service.html.markdown +++ b/website/source/docs/providers/azurerm/r/search_service.html.markdown @@ -12,7 +12,7 @@ Allows you to manage an Azure Search Service ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown index 1a58b3de1..70ff65fee 100644 --- a/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown +++ b/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown @@ -12,7 +12,7 @@ Create a ServiceBus Namespace. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown index 7c58fe339..ea2e72b69 100644 --- a/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown +++ b/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown @@ -12,7 +12,7 @@ Create a ServiceBus Subscription. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown index a317f57a9..040518788 100644 --- a/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown +++ b/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown @@ -15,7 +15,7 @@ higher. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "resourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/sql_database.html.markdown b/website/source/docs/providers/azurerm/r/sql_database.html.markdown index 1fcbe4038..65df58053 100644 --- a/website/source/docs/providers/azurerm/r/sql_database.html.markdown +++ b/website/source/docs/providers/azurerm/r/sql_database.html.markdown @@ -12,7 +12,7 @@ Allows you to manage an Azure SQL Database ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown b/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown index ffc9bfb29..5d31b1062 100644 --- a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown @@ -12,7 +12,7 @@ Allows you to manage an Azure SQL Firewall Rule ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/sql_server.html.markdown b/website/source/docs/providers/azurerm/r/sql_server.html.markdown index 58b4a5fbb..8af90f9dc 100644 --- a/website/source/docs/providers/azurerm/r/sql_server.html.markdown +++ b/website/source/docs/providers/azurerm/r/sql_server.html.markdown @@ -15,7 +15,7 @@ Allows you to manage an Azure SQL Database Server ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/storage_account.html.markdown b/website/source/docs/providers/azurerm/r/storage_account.html.markdown index eb08ba7f1..9b4e13064 100644 --- a/website/source/docs/providers/azurerm/r/storage_account.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_account.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage Account. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "testrg" { name = "resourceGroupName" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown index 83e22a53f..77e13ea92 100644 --- a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage Blob. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg-%d" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/storage_container.html.markdown b/website/source/docs/providers/azurerm/r/storage_container.html.markdown index 4ccd7da45..f355bb8ed 100644 --- a/website/source/docs/providers/azurerm/r/storage_container.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_container.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage Container. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/storage_queue.html.markdown b/website/source/docs/providers/azurerm/r/storage_queue.html.markdown index 173c6ccb6..6d0e1f573 100644 --- a/website/source/docs/providers/azurerm/r/storage_queue.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_queue.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage Queue. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg-%d" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/storage_share.html.markdown b/website/source/docs/providers/azurerm/r/storage_share.html.markdown index f27756210..0e36586c4 100644 --- a/website/source/docs/providers/azurerm/r/storage_share.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_share.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage File Share. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg-%d" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/storage_table.html.markdown b/website/source/docs/providers/azurerm/r/storage_table.html.markdown index 4881825c4..ff597cc64 100644 --- a/website/source/docs/providers/azurerm/r/storage_table.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_table.html.markdown @@ -12,7 +12,7 @@ Create an Azure Storage Table. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg-%d" location = "westus" diff --git a/website/source/docs/providers/azurerm/r/subnet.html.markdown b/website/source/docs/providers/azurerm/r/subnet.html.markdown index b5a8380da..34f4d97ea 100644 --- a/website/source/docs/providers/azurerm/r/subnet.html.markdown +++ b/website/source/docs/providers/azurerm/r/subnet.html.markdown @@ -12,7 +12,7 @@ Creates a new subnet. Subnets represent network segments within the IP space def ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acceptanceTestResourceGroup1" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown index 298286b43..5dbc67762 100644 --- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown +++ b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown @@ -12,7 +12,7 @@ Create a template deployment of resources ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg-01" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown index 8ddf80d24..5808a5cee 100644 --- a/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown +++ b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown @@ -12,7 +12,7 @@ Creates a Traffic Manager Endpoint. ## Example Usage -``` +```hcl resource "azurerm_traffic_manager_profile" "test" { name = "profile1" resource_group_name = "${azurerm_resource_group.test.name}" diff --git a/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown b/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown index 5d95a4306..d93072a4c 100644 --- a/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown +++ b/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown @@ -12,7 +12,7 @@ Creates a Traffic Manager Profile to which multiple endpoints can be attached. ## Example Usage -``` +```hcl resource "azurerm_traffic_manager_profile" "test" { name = "profile1" resource_group_name = "${azurerm_resource_group.test.name}" diff --git a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown index 565a8accb..abf232d2e 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown @@ -12,7 +12,7 @@ Create a virtual machine. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US" @@ -101,7 +101,7 @@ resource "azurerm_virtual_machine" "test" { ## Example Usage with additional Empty DataDisk -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US" @@ -198,7 +198,7 @@ resource "azurerm_virtual_machine" "test" { ## Example Usage with Managed Disks -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US 2" @@ -410,7 +410,7 @@ For more information on the different example configurations, please check out t * `certificate_url` - (Required) Specifies the URI of the key vault secrets in the format of `https:///secrets//`. Stored secret is the Base64 encoding of a JSON Object that which is encoded in UTF-8 of which the contents need to be -``` +```json { "data":"", "dataType":"pfx", diff --git a/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown index b61e4fd6b..adbb87142 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown @@ -14,7 +14,7 @@ and run automated tasks. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown index 559c7f2fc..35cad3503 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown @@ -15,7 +15,7 @@ Create a virtual machine scale set. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "acctestrg" location = "West US" diff --git a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown index 987cb15e1..697960072 100644 --- a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown @@ -13,7 +13,7 @@ optionally be configured with a security group to be associated with the subnet. ## Example Usage -``` +```hcl resource "azurerm_virtual_network" "test" { name = "virtualNetwork1" resource_group_name = "${azurerm_resource_group.test.name}" diff --git a/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown index 15ddb1788..4376a0ced 100644 --- a/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown @@ -14,7 +14,7 @@ resources in the linked virtual network. ## Example Usage -``` +```hcl resource "azurerm_resource_group" "test" { name = "peeredvnets-rg" location = "West US" diff --git a/website/source/docs/providers/bitbucket/index.html.markdown b/website/source/docs/providers/bitbucket/index.html.markdown index a5b91806f..638a1a6c1 100644 --- a/website/source/docs/providers/bitbucket/index.html.markdown +++ b/website/source/docs/providers/bitbucket/index.html.markdown @@ -15,7 +15,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Bitbucket Provider provider "bitbucket" { username = "GobBluthe" diff --git a/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown b/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown index be2333dd7..810813e89 100644 --- a/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown +++ b/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown @@ -12,7 +12,7 @@ Provides support for setting up default reviewers for your repository. ## Example Usage -``` +```hcl # Manage your repository resource "bitbucket_default_reviewers" "infrastructure" { owner = "myteam" diff --git a/website/source/docs/providers/bitbucket/r/hook.html.markdown b/website/source/docs/providers/bitbucket/r/hook.html.markdown index 404759e18..7e5a54d4e 100644 --- a/website/source/docs/providers/bitbucket/r/hook.html.markdown +++ b/website/source/docs/providers/bitbucket/r/hook.html.markdown @@ -14,7 +14,7 @@ This allows you to manage your webhooks on a repository. ## Example Usage -``` +```hcl # Manage your repositories hooks resource "bitbucket_hook" "deploy_on_push" { owner = "myteam" diff --git a/website/source/docs/providers/bitbucket/r/repository.html.markdown b/website/source/docs/providers/bitbucket/r/repository.html.markdown index cc584d9b1..fcff7c53b 100644 --- a/website/source/docs/providers/bitbucket/r/repository.html.markdown +++ b/website/source/docs/providers/bitbucket/r/repository.html.markdown @@ -15,7 +15,7 @@ private, how to fork the repository and other options. ## Example Usage -``` +```hcl # Manage your repository resource "bitbucket_repository" "infrastructure" { owner = "myteam" diff --git a/website/source/docs/providers/do/index.html.markdown b/website/source/docs/providers/do/index.html.markdown index cc92160dc..1c7571f9f 100644 --- a/website/source/docs/providers/do/index.html.markdown +++ b/website/source/docs/providers/do/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Set the variable value in *.tfvars file # or using -var="do_token=..." CLI option variable "do_token" {} diff --git a/website/source/docs/providers/do/r/domain.html.markdown b/website/source/docs/providers/do/r/domain.html.markdown index 85d3f258c..c90ae150e 100644 --- a/website/source/docs/providers/do/r/domain.html.markdown +++ b/website/source/docs/providers/do/r/domain.html.markdown @@ -12,7 +12,7 @@ Provides a DigitalOcean domain resource. ## Example Usage -``` +```hcl # Create a new domain resource "digitalocean_domain" "default" { name = "www.example.com" diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown index a3b026000..f2fd16c6c 100644 --- a/website/source/docs/providers/do/r/droplet.html.markdown +++ b/website/source/docs/providers/do/r/droplet.html.markdown @@ -14,7 +14,7 @@ modify, and delete Droplets. Droplets also support ## Example Usage -``` +```hcl # Create a new Web Droplet in the nyc2 region resource "digitalocean_droplet" "web" { image = "ubuntu-14-04-x64" diff --git a/website/source/docs/providers/do/r/floating_ip.html.markdown b/website/source/docs/providers/do/r/floating_ip.html.markdown index 0c1a823d8..63196b09d 100644 --- a/website/source/docs/providers/do/r/floating_ip.html.markdown +++ b/website/source/docs/providers/do/r/floating_ip.html.markdown @@ -12,7 +12,7 @@ Provides a DigitalOcean Floating IP to represent a publicly-accessible static IP ## Example Usage -``` +```hcl resource "digitalocean_droplet" "foobar" { name = "baz" size = "1gb" diff --git a/website/source/docs/providers/do/r/loadbalancer.html.markdown b/website/source/docs/providers/do/r/loadbalancer.html.markdown index dd36ff917..ec51ebcad 100644 --- a/website/source/docs/providers/do/r/loadbalancer.html.markdown +++ b/website/source/docs/providers/do/r/loadbalancer.html.markdown @@ -13,7 +13,7 @@ modify, and delete Load Balancers. ## Example Usage -``` +```hcl resource "digitalocean_droplet" "web" { name = "web-1" size = "512mb" diff --git a/website/source/docs/providers/do/r/record.html.markdown b/website/source/docs/providers/do/r/record.html.markdown index d4feb953a..a9ddb50d6 100644 --- a/website/source/docs/providers/do/r/record.html.markdown +++ b/website/source/docs/providers/do/r/record.html.markdown @@ -12,7 +12,7 @@ Provides a DigitalOcean DNS record resource. ## Example Usage -``` +```hcl # Create a new domain resource "digitalocean_domain" "default" { name = "www.example.com" diff --git a/website/source/docs/providers/do/r/ssh_key.html.markdown b/website/source/docs/providers/do/r/ssh_key.html.markdown index e500b5aa1..4d27039fe 100644 --- a/website/source/docs/providers/do/r/ssh_key.html.markdown +++ b/website/source/docs/providers/do/r/ssh_key.html.markdown @@ -15,7 +15,7 @@ fingerprint. ## Example Usage -``` +```hcl # Create a new SSH key resource "digitalocean_ssh_key" "default" { name = "Terraform Example" diff --git a/website/source/docs/providers/do/r/tag.html.markdown b/website/source/docs/providers/do/r/tag.html.markdown index 42c9b0357..4ec09795a 100644 --- a/website/source/docs/providers/do/r/tag.html.markdown +++ b/website/source/docs/providers/do/r/tag.html.markdown @@ -15,7 +15,7 @@ configuration via their ID or name. ## Example Usage -``` +```hcl # Create a new tag resource "digitalocean_tag" "foobar" { name = "foobar" diff --git a/website/source/docs/providers/do/r/volume.markdown b/website/source/docs/providers/do/r/volume.markdown index 0a25284c9..23ffedfb8 100644 --- a/website/source/docs/providers/do/r/volume.markdown +++ b/website/source/docs/providers/do/r/volume.markdown @@ -12,7 +12,7 @@ Provides a DigitalOcean Block Storage volume which can be attached to a Droplet ## Example Usage -``` +```hcl resource "digitalocean_volume" "foobar" { region = "nyc1" name = "baz" diff --git a/website/source/docs/providers/docker/d/registry_image.html.markdown b/website/source/docs/providers/docker/d/registry_image.html.markdown index 9ec583625..f8d0f8dea 100644 --- a/website/source/docs/providers/docker/d/registry_image.html.markdown +++ b/website/source/docs/providers/docker/d/registry_image.html.markdown @@ -16,7 +16,7 @@ to date on the latest available version of the tag. ## Example Usage -``` +```hcl data "docker_registry_image" "ubuntu" { name = "ubuntu:precise" } diff --git a/website/source/docs/providers/docker/index.html.markdown b/website/source/docs/providers/docker/index.html.markdown index bff8f6140..f55e0973a 100644 --- a/website/source/docs/providers/docker/index.html.markdown +++ b/website/source/docs/providers/docker/index.html.markdown @@ -18,7 +18,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Docker provider provider "docker" { host = "tcp://127.0.0.1:2376/" diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown index 8a7671591..d0996c992 100644 --- a/website/source/docs/providers/docker/r/container.html.markdown +++ b/website/source/docs/providers/docker/r/container.html.markdown @@ -12,7 +12,7 @@ Manages the lifecycle of a Docker container. ## Example Usage -``` +```hcl # Start a container resource "docker_container" "ubuntu" { name = "foo" @@ -93,7 +93,7 @@ The following arguments are supported: Example: -``` +```hcl resource "docker_container" "ubuntu" { name = "foo" image = "${docker_image.ubuntu.latest}" diff --git a/website/source/docs/providers/docker/r/image.html.markdown b/website/source/docs/providers/docker/r/image.html.markdown index 2520ce3a6..c08f016ea 100644 --- a/website/source/docs/providers/docker/r/image.html.markdown +++ b/website/source/docs/providers/docker/r/image.html.markdown @@ -18,7 +18,7 @@ data source to update the `pull_triggers` field. ## Example Usage -``` +```hcl # Find the latest Ubuntu precise image. resource "docker_image" "ubuntu" { name = "ubuntu:precise" @@ -30,7 +30,7 @@ resource "docker_image" "ubuntu" { ### Dynamic image -``` +```hcl data "docker_registry_image" "ubuntu" { name = "ubuntu:precise" } diff --git a/website/source/docs/providers/docker/r/network.html.markdown b/website/source/docs/providers/docker/r/network.html.markdown index d931e0b4e..d0636d499 100644 --- a/website/source/docs/providers/docker/r/network.html.markdown +++ b/website/source/docs/providers/docker/r/network.html.markdown @@ -14,7 +14,7 @@ to create virtual networks within the docker environment. ## Example Usage -``` +```hcl # Find the latest Ubuntu precise image. resource "docker_network" "private_network" { name = "my_network" diff --git a/website/source/docs/providers/docker/r/volume.html.markdown b/website/source/docs/providers/docker/r/volume.html.markdown index e9a31d364..f5d9dee94 100644 --- a/website/source/docs/providers/docker/r/volume.html.markdown +++ b/website/source/docs/providers/docker/r/volume.html.markdown @@ -14,7 +14,7 @@ to prepare volumes that can be shared across containers. ## Example Usage -``` +```hcl # Creates a docker volume "shared_volume". resource "docker_volume" "shared_volume" { name = "shared_volume" diff --git a/website/source/docs/providers/dyn/index.html.markdown b/website/source/docs/providers/dyn/index.html.markdown index f7417e6b6..8811bb7bc 100644 --- a/website/source/docs/providers/dyn/index.html.markdown +++ b/website/source/docs/providers/dyn/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Dyn provider provider "dyn" { customer_name = "${var.dyn_customer_name}" diff --git a/website/source/docs/providers/dyn/r/record.html.markdown b/website/source/docs/providers/dyn/r/record.html.markdown index 9aaf3f7d6..92cd3b510 100644 --- a/website/source/docs/providers/dyn/r/record.html.markdown +++ b/website/source/docs/providers/dyn/r/record.html.markdown @@ -12,7 +12,7 @@ Provides a Dyn DNS record resource. ## Example Usage -``` +```hcl # Add a record to the domain resource "dyn_record" "foobar" { zone = "${var.dyn_zone}" diff --git a/website/source/docs/providers/google/index.html.markdown b/website/source/docs/providers/google/index.html.markdown index 256a1c940..d0eecd113 100644 --- a/website/source/docs/providers/google/index.html.markdown +++ b/website/source/docs/providers/google/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -```js +```hcl // Configure the Google Cloud provider provider "google" { credentials = "${file("account.json")}" diff --git a/website/source/docs/providers/google/r/compute_address.html.markdown b/website/source/docs/providers/google/r/compute_address.html.markdown index df0c8fad5..8fb5427f9 100644 --- a/website/source/docs/providers/google/r/compute_address.html.markdown +++ b/website/source/docs/providers/google/r/compute_address.html.markdown @@ -15,7 +15,7 @@ Creates a static IP address resource for Google Compute Engine. For more informa ## Example Usage -```js +```hcl resource "google_compute_address" "default" { name = "test-address" } diff --git a/website/source/docs/providers/google/r/compute_autoscaler.html.markdown b/website/source/docs/providers/google/r/compute_autoscaler.html.markdown index d2bd362b8..c963e537f 100644 --- a/website/source/docs/providers/google/r/compute_autoscaler.html.markdown +++ b/website/source/docs/providers/google/r/compute_autoscaler.html.markdown @@ -20,7 +20,7 @@ documentation](https://cloud.google.com/compute/docs/autoscaler/) and ## Example Usage -```js +```hcl resource "google_compute_instance_template" "foobar" { name = "foobar" machine_type = "n1-standard-1" diff --git a/website/source/docs/providers/google/r/compute_backend_service.html.markdown b/website/source/docs/providers/google/r/compute_backend_service.html.markdown index a8eaffa0f..551c29883 100644 --- a/website/source/docs/providers/google/r/compute_backend_service.html.markdown +++ b/website/source/docs/providers/google/r/compute_backend_service.html.markdown @@ -12,7 +12,7 @@ A Backend Service defines a group of virtual machines that will serve traffic fo ## Example Usage -```js +```hcl resource "google_compute_backend_service" "foobar" { name = "blablah" description = "Hello World 1234" diff --git a/website/source/docs/providers/google/r/compute_disk.html.markdown b/website/source/docs/providers/google/r/compute_disk.html.markdown index 08e73c23f..ef723cb1b 100644 --- a/website/source/docs/providers/google/r/compute_disk.html.markdown +++ b/website/source/docs/providers/google/r/compute_disk.html.markdown @@ -15,7 +15,7 @@ Creates a new persistent disk within GCE, based on another disk. ## Example Usage -```js +```hcl resource "google_compute_disk" "default" { name = "test-disk" type = "pd-ssd" diff --git a/website/source/docs/providers/google/r/compute_firewall.html.markdown b/website/source/docs/providers/google/r/compute_firewall.html.markdown index c495b3a1c..bc1f93c28 100644 --- a/website/source/docs/providers/google/r/compute_firewall.html.markdown +++ b/website/source/docs/providers/google/r/compute_firewall.html.markdown @@ -12,7 +12,7 @@ Manages a firewall resource within GCE. ## Example Usage -```js +```hcl resource "google_compute_firewall" "default" { name = "test" network = "${google_compute_network.other.name}" diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown index dbfab2937..3b7f4de2b 100644 --- a/website/source/docs/providers/google/r/compute_global_address.html.markdown +++ b/website/source/docs/providers/google/r/compute_global_address.html.markdown @@ -15,7 +15,7 @@ Creates a static IP address resource global to a Google Compute Engine project. ## Example Usage -```js +```hcl resource "google_compute_global_address" "default" { name = "test-address" } diff --git a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown index 62d1d06ce..df04a6935 100644 --- a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown +++ b/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown @@ -15,7 +15,7 @@ documentation](https://cloud.google.com/compute/docs/load-balancing/http/global- ## Example Usage -```js +```hcl resource "google_compute_global_forwarding_rule" "default" { name = "test" target = "${google_compute_target_http_proxy.default.self_link}" diff --git a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown index 230386555..c5ee13755 100644 --- a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown +++ b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown @@ -17,7 +17,7 @@ and ## Example Usage -```js +```hcl resource "google_compute_http_health_check" "default" { name = "test" request_path = "/health_check" diff --git a/website/source/docs/providers/google/r/compute_https_health_check.html.markdown b/website/source/docs/providers/google/r/compute_https_health_check.html.markdown index e04054bcc..79bb102ce 100644 --- a/website/source/docs/providers/google/r/compute_https_health_check.html.markdown +++ b/website/source/docs/providers/google/r/compute_https_health_check.html.markdown @@ -17,7 +17,7 @@ and ## Example Usage -```js +```hcl resource "google_compute_https_health_check" "default" { name = "test" request_path = "/health_check" diff --git a/website/source/docs/providers/google/r/compute_image.html.markdown b/website/source/docs/providers/google/r/compute_image.html.markdown index 3983d9817..fe5bea7af 100644 --- a/website/source/docs/providers/google/r/compute_image.html.markdown +++ b/website/source/docs/providers/google/r/compute_image.html.markdown @@ -15,7 +15,7 @@ tarball. For more information see [the official documentation](https://cloud.goo ## Example Usage -```js +```hcl resource "google_compute_image" "bootable-image" { name = "my-custom-image" diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index 3481d4009..3ed964a13 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -16,7 +16,7 @@ and ## Example Usage -```js +```hcl resource "google_compute_instance" "default" { name = "test" machine_type = "n1-standard-1" diff --git a/website/source/docs/providers/google/r/compute_instance_group.html.markdown b/website/source/docs/providers/google/r/compute_instance_group.html.markdown index 2fe520621..b7ed3db1e 100644 --- a/website/source/docs/providers/google/r/compute_instance_group.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance_group.html.markdown @@ -16,7 +16,8 @@ and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroups) ## Example Usage ### Empty instance group -```js + +```hcl resource "google_compute_instance_group" "test" { name = "terraform-test" description = "Terraform test instance group" @@ -25,7 +26,8 @@ resource "google_compute_instance_group" "test" { ``` ### With instances and named ports -```js + +```hcl resource "google_compute_instance_group" "webservers" { name = "terraform-webservers" description = "Terraform test instance group" diff --git a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown index 610263ac3..b7d8c6cd9 100644 --- a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown @@ -15,7 +15,7 @@ and [API](https://cloud.google.com/compute/docs/instance-groups/manager/v1beta2/ ## Example Usage -```js +```hcl resource "google_compute_instance_group_manager" "foobar" { name = "terraform-test" description = "Terraform test instance group manager" diff --git a/website/source/docs/providers/google/r/compute_instance_template.html.markdown b/website/source/docs/providers/google/r/compute_instance_template.html.markdown index 5670409b7..78bd47bca 100644 --- a/website/source/docs/providers/google/r/compute_instance_template.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance_template.html.markdown @@ -17,7 +17,7 @@ and ## Example Usage -```js +```hcl resource "google_compute_instance_template" "foobar" { name = "terraform-test" description = "template description" @@ -71,7 +71,7 @@ it's recommended to specify `create_before_destroy` in a [lifecycle][2] block. Either omit the Instance Template `name` attribute, or specify a partial name with `name_prefix`. Example: -``` +```hcl resource "google_compute_instance_template" "instance_template" { name_prefix = "instance-template-" machine_type = "n1-standard-1" diff --git a/website/source/docs/providers/google/r/compute_network.html.markdown b/website/source/docs/providers/google/r/compute_network.html.markdown index cd03ad2bb..0146dc8a9 100644 --- a/website/source/docs/providers/google/r/compute_network.html.markdown +++ b/website/source/docs/providers/google/r/compute_network.html.markdown @@ -12,7 +12,7 @@ Manages a network within GCE. ## Example Usage -```js +```hcl resource "google_compute_network" "default" { name = "test" auto_create_subnetworks = "true" diff --git a/website/source/docs/providers/google/r/compute_project_metadata.html.markdown b/website/source/docs/providers/google/r/compute_project_metadata.html.markdown index dfe9c2501..3b7dd19df 100644 --- a/website/source/docs/providers/google/r/compute_project_metadata.html.markdown +++ b/website/source/docs/providers/google/r/compute_project_metadata.html.markdown @@ -12,7 +12,7 @@ Manages metadata common to all instances for a project in GCE. ## Example Usage -``` +```hcl resource "google_compute_project_metadata" "default" { metadata { foo = "bar" diff --git a/website/source/docs/providers/google/r/compute_route.html.markdown b/website/source/docs/providers/google/r/compute_route.html.markdown index d7d9fbe71..c44566209 100644 --- a/website/source/docs/providers/google/r/compute_route.html.markdown +++ b/website/source/docs/providers/google/r/compute_route.html.markdown @@ -12,7 +12,7 @@ Manages a network route within GCE. ## Example Usage -```js +```hcl resource "google_compute_network" "foobar" { name = "test" ipv4_range = "10.0.0.0/16" diff --git a/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown b/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown index 63437bbed..48d94742a 100644 --- a/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown +++ b/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown @@ -16,7 +16,7 @@ For more information see ## Example Usage -```js +```hcl resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" description = "a description" diff --git a/website/source/docs/providers/google/r/compute_subnetwork.html.markdown b/website/source/docs/providers/google/r/compute_subnetwork.html.markdown index 81f7c1387..2879f8a79 100644 --- a/website/source/docs/providers/google/r/compute_subnetwork.html.markdown +++ b/website/source/docs/providers/google/r/compute_subnetwork.html.markdown @@ -12,7 +12,7 @@ Manages a subnetwork within GCE. ## Example Usage -```js +```hcl resource "google_compute_subnetwork" "default-us-east1" { name = "default-us-east1" ip_cidr_range = "10.0.0.0/16" diff --git a/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown b/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown index 18071e416..dc23841bf 100644 --- a/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown +++ b/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown @@ -16,7 +16,7 @@ documentation](https://cloud.google.com/compute/docs/load-balancing/http/target- ## Example Usage -```js +```hcl resource "google_compute_target_http_proxy" "default" { name = "test-proxy" description = "a description" diff --git a/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown b/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown index 932c71b7b..c8c2e398e 100644 --- a/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown +++ b/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown @@ -16,7 +16,7 @@ documentation](https://cloud.google.com/compute/docs/load-balancing/http/target- ## Example Usage -```js +```hcl resource "google_compute_target_https_proxy" "default" { name = "test-proxy" description = "a description" diff --git a/website/source/docs/providers/google/r/compute_target_pool.html.markdown b/website/source/docs/providers/google/r/compute_target_pool.html.markdown index 0192c7a72..72a3ce153 100644 --- a/website/source/docs/providers/google/r/compute_target_pool.html.markdown +++ b/website/source/docs/providers/google/r/compute_target_pool.html.markdown @@ -17,7 +17,7 @@ and [API](https://cloud.google.com/compute/docs/reference/latest/targetPools). ## Example Usage -```js +```hcl resource "google_compute_target_pool" "default" { name = "test" diff --git a/website/source/docs/providers/google/r/compute_url_map.html.markdown b/website/source/docs/providers/google/r/compute_url_map.html.markdown index c3e127963..faad2a1ee 100644 --- a/website/source/docs/providers/google/r/compute_url_map.html.markdown +++ b/website/source/docs/providers/google/r/compute_url_map.html.markdown @@ -16,7 +16,7 @@ and ## Example Usage -```js +```hcl resource "google_compute_url_map" "foobar" { name = "urlmap" description = "a description" diff --git a/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown b/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown index 0c511b554..c0f100a03 100644 --- a/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown +++ b/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown @@ -14,7 +14,7 @@ Manages a VPN Gateway in the GCE network. For more info, read the ## Example Usage -```js +```hcl resource "google_compute_network" "network1" { name = "network1" ipv4_range = "10.120.0.0/16" diff --git a/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown b/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown index a3673273c..059527882 100644 --- a/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown +++ b/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown @@ -13,7 +13,7 @@ Manages a VPN Tunnel to the GCE network. For more info, read the ## Example Usage -```js +```hcl resource "google_compute_network" "network1" { name = "network1" } diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown index 7679f1fd2..b72d5b822 100644 --- a/website/source/docs/providers/google/r/container_cluster.html.markdown +++ b/website/source/docs/providers/google/r/container_cluster.html.markdown @@ -17,7 +17,7 @@ whole cluster! ## Example usage -```js +```hcl resource "google_container_cluster" "primary" { name = "marcellus-wallace" zone = "us-central1-a" diff --git a/website/source/docs/providers/google/r/container_node_pool.html.markdown b/website/source/docs/providers/google/r/container_node_pool.html.markdown index 12a24cbc7..386d912ee 100644 --- a/website/source/docs/providers/google/r/container_node_pool.html.markdown +++ b/website/source/docs/providers/google/r/container_node_pool.html.markdown @@ -15,7 +15,7 @@ and ## Example usage -```tf +```hcl resource "google_container_node_pool" "np" { name = "my-node-pool" zone = "us-central1-a" diff --git a/website/source/docs/providers/google/r/dns_managed_zone.markdown b/website/source/docs/providers/google/r/dns_managed_zone.markdown index 25d227ddb..5f5bc95ad 100644 --- a/website/source/docs/providers/google/r/dns_managed_zone.markdown +++ b/website/source/docs/providers/google/r/dns_managed_zone.markdown @@ -12,7 +12,7 @@ Manages a zone within Google Cloud DNS. ## Example Usage -```js +```hcl resource "google_dns_managed_zone" "prod" { name = "prod-zone" dns_name = "prod.mydomain.com." diff --git a/website/source/docs/providers/google/r/dns_record_set.markdown b/website/source/docs/providers/google/r/dns_record_set.markdown index 1f4b39d22..dcf33371c 100644 --- a/website/source/docs/providers/google/r/dns_record_set.markdown +++ b/website/source/docs/providers/google/r/dns_record_set.markdown @@ -14,7 +14,7 @@ Manages a set of DNS records within Google Cloud DNS. This example is the common case of binding a DNS name to the ephemeral IP of a new instance: -```js +```hcl resource "google_compute_instance" "frontend" { name = "frontend" machine_type = "g1-small" diff --git a/website/source/docs/providers/google/r/google_project.html.markdown b/website/source/docs/providers/google/r/google_project.html.markdown index 6fb0e7c7f..4e8693919 100755 --- a/website/source/docs/providers/google/r/google_project.html.markdown +++ b/website/source/docs/providers/google/r/google_project.html.markdown @@ -39,7 +39,7 @@ Terraform. Only newly added projects are affected. ## Example Usage -```js +```hcl resource "google_project" "my_project" { project_id = "your-project-id" org_id = "1234567" diff --git a/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown b/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown index dcc9d87b7..4dbc8a625 100644 --- a/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown +++ b/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown @@ -16,7 +16,7 @@ Platform project. ## Example Usage -```js +```hcl resource "google_project_iam_policy" "project" { project = "your-project-id" policy_data = "${data.google_iam_policy.admin.policy_data}" diff --git a/website/source/docs/providers/google/r/google_project_services.html.markdown b/website/source/docs/providers/google/r/google_project_services.html.markdown index d6d2eff13..0637816a0 100644 --- a/website/source/docs/providers/google/r/google_project_services.html.markdown +++ b/website/source/docs/providers/google/r/google_project_services.html.markdown @@ -14,7 +14,7 @@ in the config will be removed. ## Example Usage -```js +```hcl resource "google_project_services" "project" { project = "your-project-id" services = ["iam.googleapis.com", "cloudresourcemanager.googleapis.com"] diff --git a/website/source/docs/providers/google/r/google_service_account.html.markdown b/website/source/docs/providers/google/r/google_service_account.html.markdown index 7c0c6350d..c3cd1c899 100644 --- a/website/source/docs/providers/google/r/google_service_account.html.markdown +++ b/website/source/docs/providers/google/r/google_service_account.html.markdown @@ -15,7 +15,7 @@ Allows management of a [Google Cloud Platform service account](https://cloud.goo This snippet creates a service account, then gives it objectViewer permission in a project. -```js +```hcl resource "google_service_account" "object_viewer" { account_id = "object-viewer" display_name = "Object viewer" diff --git a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown index d5b7aed18..ce375b12d 100644 --- a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown +++ b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown @@ -15,7 +15,7 @@ Creates a subscription in Google's pubsub queueing system. For more information ## Example Usage -```js +```hcl resource "google_pubsub_subscription" "default" { name = "default-subscription" topic = "default-topic" diff --git a/website/source/docs/providers/google/r/pubsub_topic.html.markdown b/website/source/docs/providers/google/r/pubsub_topic.html.markdown index 3f794e435..ceb3aebc2 100644 --- a/website/source/docs/providers/google/r/pubsub_topic.html.markdown +++ b/website/source/docs/providers/google/r/pubsub_topic.html.markdown @@ -15,7 +15,7 @@ Creates a topic in Google's pubsub queueing system. For more information see ## Example Usage -```js +```hcl resource "google_pubsub_topic" "default" { name = "default-topic" } diff --git a/website/source/docs/providers/google/r/sql_database.html.markdown b/website/source/docs/providers/google/r/sql_database.html.markdown index 8d6958c8f..d4c6b9802 100644 --- a/website/source/docs/providers/google/r/sql_database.html.markdown +++ b/website/source/docs/providers/google/r/sql_database.html.markdown @@ -14,7 +14,7 @@ Creates a new Google SQL Database on a Google SQL Database Instance. For more in Example creating a SQL Database. -```js +```hcl resource "google_sql_database_instance" "master" { name = "master-instance" diff --git a/website/source/docs/providers/google/r/sql_database_instance.html.markdown b/website/source/docs/providers/google/r/sql_database_instance.html.markdown index 88535670e..cb96a7234 100644 --- a/website/source/docs/providers/google/r/sql_database_instance.html.markdown +++ b/website/source/docs/providers/google/r/sql_database_instance.html.markdown @@ -20,7 +20,7 @@ a restricted host and strong password. Example creating a SQL Database. -```js +```hcl resource "google_sql_database_instance" "master" { name = "master-instance" diff --git a/website/source/docs/providers/google/r/sql_user.html.markdown b/website/source/docs/providers/google/r/sql_user.html.markdown index 7e4937d8e..a486a6b9e 100644 --- a/website/source/docs/providers/google/r/sql_user.html.markdown +++ b/website/source/docs/providers/google/r/sql_user.html.markdown @@ -17,7 +17,7 @@ Creates a new Google SQL User on a Google SQL User Instance. For more informatio Example creating a SQL User. -```js +```hcl resource "google_sql_database_instance" "master" { name = "master-instance" diff --git a/website/source/docs/providers/google/r/storage_bucket.html.markdown b/website/source/docs/providers/google/r/storage_bucket.html.markdown index dbd482c6c..08c9b9972 100644 --- a/website/source/docs/providers/google/r/storage_bucket.html.markdown +++ b/website/source/docs/providers/google/r/storage_bucket.html.markdown @@ -15,7 +15,7 @@ Creates a new bucket in Google cloud storage service(GCS). Currently, it will no Example creating a private bucket in standard storage, in the EU region. -```js +```hcl resource "google_storage_bucket" "image-store" { name = "image-store-bucket" location = "EU" diff --git a/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown b/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown index 5e5117557..dc8daa22e 100644 --- a/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown +++ b/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown @@ -14,7 +14,7 @@ Creates a new bucket ACL in Google cloud storage service(GCS). Example creating an ACL on a bucket with one owner, and one reader. -```js +```hcl resource "google_storage_bucket" "image-store" { name = "image-store-bucket" location = "EU" diff --git a/website/source/docs/providers/google/r/storage_bucket_object.html.markdown b/website/source/docs/providers/google/r/storage_bucket_object.html.markdown index 20092b0ee..49f7e24e0 100644 --- a/website/source/docs/providers/google/r/storage_bucket_object.html.markdown +++ b/website/source/docs/providers/google/r/storage_bucket_object.html.markdown @@ -15,7 +15,7 @@ Creates a new object inside an exisiting bucket in Google cloud storage service Example creating a public object in an existing `image-store` bucket. -```js +```hcl resource "google_storage_bucket_object" "picture" { name = "butterfly01" source = "/images/nature/garden-tiger-moth.jpg" diff --git a/website/source/docs/providers/google/r/storage_object_acl.html.markdown b/website/source/docs/providers/google/r/storage_object_acl.html.markdown index 1b27da3f4..63f69dc08 100644 --- a/website/source/docs/providers/google/r/storage_object_acl.html.markdown +++ b/website/source/docs/providers/google/r/storage_object_acl.html.markdown @@ -14,7 +14,7 @@ Creates a new object ACL in Google cloud storage service (GCS) Create an object ACL with one owner and one reader. -```js +```hcl resource "google_storage_bucket" "image-store" { name = "image-store-bucket" location = "EU" diff --git a/website/source/docs/providers/grafana/index.html.markdown b/website/source/docs/providers/grafana/index.html.markdown index 09d4d0cd2..afecda124 100644 --- a/website/source/docs/providers/grafana/index.html.markdown +++ b/website/source/docs/providers/grafana/index.html.markdown @@ -26,7 +26,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl provider "grafana" { url = "http://grafana.example.com/" auth = "1234abcd" diff --git a/website/source/docs/providers/grafana/r/dashboard.html.md b/website/source/docs/providers/grafana/r/dashboard.html.md index d00fc17f8..b37a69e7f 100644 --- a/website/source/docs/providers/grafana/r/dashboard.html.md +++ b/website/source/docs/providers/grafana/r/dashboard.html.md @@ -12,7 +12,7 @@ The dashboard resource allows a dashboard to created on a Grafana server. ## Example Usage -``` +```hcl resource "grafana_dashboard" "metrics" { config_json = "${file("grafana-dashboard.json")}" } @@ -28,7 +28,7 @@ using the `grafana_data_source` resource. In order to ensure that a data source is created before a dashboard that refers to it, use the `depends_on` meta-parameter: -``` +```hcl depends_on = ["grafana_data_source.metrics"] ``` diff --git a/website/source/docs/providers/grafana/r/data_source.html.md b/website/source/docs/providers/grafana/r/data_source.html.md index 182e5690e..aa3df4f8c 100644 --- a/website/source/docs/providers/grafana/r/data_source.html.md +++ b/website/source/docs/providers/grafana/r/data_source.html.md @@ -18,7 +18,7 @@ InfluxDB. See [Grafana's *Data Sources Guides*](http://docs.grafana.org/#data-sources-guides) for more details on the supported data source types and the arguments they use. -``` +```hcl resource "grafana_data_source" "metrics" { type = "influxdb" name = "myapp-metrics" diff --git a/website/source/docs/providers/heroku/index.html.markdown b/website/source/docs/providers/heroku/index.html.markdown index b72c1dfcf..2bec7aa85 100644 --- a/website/source/docs/providers/heroku/index.html.markdown +++ b/website/source/docs/providers/heroku/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Heroku provider provider "heroku" { email = "ops@company.com" diff --git a/website/source/docs/providers/heroku/r/addon.html.markdown b/website/source/docs/providers/heroku/r/addon.html.markdown index 4720da83f..cfe4931b4 100644 --- a/website/source/docs/providers/heroku/r/addon.html.markdown +++ b/website/source/docs/providers/heroku/r/addon.html.markdown @@ -13,7 +13,7 @@ services to a Heroku app. ## Example Usage -``` +```hcl # Create a new Heroku app resource "heroku_app" "default" { name = "test-app" diff --git a/website/source/docs/providers/heroku/r/app.html.markdown b/website/source/docs/providers/heroku/r/app.html.markdown index 6f9143d34..410f01ead 100644 --- a/website/source/docs/providers/heroku/r/app.html.markdown +++ b/website/source/docs/providers/heroku/r/app.html.markdown @@ -13,7 +13,7 @@ create and manage applications on Heroku. ## Example Usage -``` +```hcl # Create a new Heroku app resource "heroku_app" "default" { name = "my-cool-app" diff --git a/website/source/docs/providers/heroku/r/cert.html.markdown b/website/source/docs/providers/heroku/r/cert.html.markdown index dbe2aab96..0a2f8a530 100644 --- a/website/source/docs/providers/heroku/r/cert.html.markdown +++ b/website/source/docs/providers/heroku/r/cert.html.markdown @@ -12,7 +12,7 @@ Provides a Heroku SSL certificate resource. It allows to set a given certificate ## Example Usage -``` +```hcl # Create a new Heroku app resource "heroku_app" "default" { name = "test-app" diff --git a/website/source/docs/providers/heroku/r/domain.html.markdown b/website/source/docs/providers/heroku/r/domain.html.markdown index ba4915eaf..451c3f411 100644 --- a/website/source/docs/providers/heroku/r/domain.html.markdown +++ b/website/source/docs/providers/heroku/r/domain.html.markdown @@ -13,7 +13,7 @@ create and manage applications on Heroku. ## Example Usage -``` +```hcl # Create a new Heroku app resource "heroku_app" "default" { name = "test-app" diff --git a/website/source/docs/providers/heroku/r/drain.html.markdown b/website/source/docs/providers/heroku/r/drain.html.markdown index 662843bdf..a91fced45 100644 --- a/website/source/docs/providers/heroku/r/drain.html.markdown +++ b/website/source/docs/providers/heroku/r/drain.html.markdown @@ -13,7 +13,7 @@ create and manage Log Drains on Heroku. ## Example Usage -``` +```hcl resource "heroku_drain" "default" { app = "test-app" url = "syslog://terraform.example.com:1234" diff --git a/website/source/docs/providers/icinga2/index.html.markdown b/website/source/docs/providers/icinga2/index.html.markdown index e5103813c..9c8850dee 100644 --- a/website/source/docs/providers/icinga2/index.html.markdown +++ b/website/source/docs/providers/icinga2/index.html.markdown @@ -16,7 +16,7 @@ for an API user with the appropriate permissions. ## Example Usage -``` +```hcl # Configure the Icinga2 provider provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" @@ -40,7 +40,7 @@ icinga2 provider block: Usage: -``` +```hcl provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" api_user = "root" @@ -55,13 +55,13 @@ You can provide your credentials via `ICINGA2_API_USER` and `ICINGA2_API_PASSWOR environment variables, storing your Icinga2 API user and password, respectively. `ICINGA2_API_URL` and `ICINGA2_INSECURE_SKIP_TLS_VERIFY` are also used, if applicable: -``` +```hcl provider "icinga" {} ``` Usage: -``` +```hcl $ export ICINGA2_API_URL=https://192.168.33.5:5665/v1 $ export ICINGA2_API_USER=root $ export ICINGA2_API_PASSWORD=icinga diff --git a/website/source/docs/providers/icinga2/r/checkcommands.html.markdown b/website/source/docs/providers/icinga2/r/checkcommands.html.markdown index 98c830f6a..1055bb6eb 100644 --- a/website/source/docs/providers/icinga2/r/checkcommands.html.markdown +++ b/website/source/docs/providers/icinga2/r/checkcommands.html.markdown @@ -13,7 +13,7 @@ and deleted. ## Example Usage -``` +```hcl # Configure a new checkcommand on an Icinga2 Server, that can be used to monitor hosts and/or services provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" diff --git a/website/source/docs/providers/icinga2/r/host.html.markdown b/website/source/docs/providers/icinga2/r/host.html.markdown index 9b3ca962a..1cdc7c103 100644 --- a/website/source/docs/providers/icinga2/r/host.html.markdown +++ b/website/source/docs/providers/icinga2/r/host.html.markdown @@ -13,7 +13,7 @@ and deleted. ## Example Usage -``` +```hcl # Configure a new host to be monitored by an Icinga2 Server provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" diff --git a/website/source/docs/providers/icinga2/r/hostgroup.html.markdown b/website/source/docs/providers/icinga2/r/hostgroup.html.markdown index 58684373c..ce9578f7e 100644 --- a/website/source/docs/providers/icinga2/r/hostgroup.html.markdown +++ b/website/source/docs/providers/icinga2/r/hostgroup.html.markdown @@ -13,7 +13,7 @@ and deleted. ## Example Usage -``` +```hcl # Configure a new hostgroup to be monitored by an Icinga2 Server provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" diff --git a/website/source/docs/providers/icinga2/r/service.html.markdown b/website/source/docs/providers/icinga2/r/service.html.markdown index 13f654bae..968d8cef0 100644 --- a/website/source/docs/providers/icinga2/r/service.html.markdown +++ b/website/source/docs/providers/icinga2/r/service.html.markdown @@ -13,7 +13,7 @@ and deleted. ## Example Usage -``` +```hcl # Configure a new service to be monitored by an Icinga2 Server provider "icinga2" { api_url = "https://192.168.33.5:5665/v1" diff --git a/website/source/docs/providers/ignition/d/config.html.md b/website/source/docs/providers/ignition/d/config.html.md index d565758ff..bbf68bee7 100644 --- a/website/source/docs/providers/ignition/d/config.html.md +++ b/website/source/docs/providers/ignition/d/config.html.md @@ -12,7 +12,7 @@ Renders an ignition configuration as JSON. It contains all the disks, partition ## Example Usage -``` +```hcl data "ignition_config" "example" { systemd = [ "${data.ignition_systemd_unit.example.id}", diff --git a/website/source/docs/providers/ignition/d/disk.html.md b/website/source/docs/providers/ignition/d/disk.html.md index 2fc64ffc2..c061ceeb0 100644 --- a/website/source/docs/providers/ignition/d/disk.html.md +++ b/website/source/docs/providers/ignition/d/disk.html.md @@ -12,7 +12,7 @@ Describes the desired state of a system’s disk. ## Example Usage -``` +```hcl data "ignition_disk" "foo" { device = "/dev/sda" partition { diff --git a/website/source/docs/providers/ignition/d/file.html.md b/website/source/docs/providers/ignition/d/file.html.md index 9628730f2..b851bbbcd 100644 --- a/website/source/docs/providers/ignition/d/file.html.md +++ b/website/source/docs/providers/ignition/d/file.html.md @@ -14,8 +14,7 @@ Describes a file to be written in a particular filesystem. File with inline content: - -``` +```hcl data "ignition_file" "hello" { filesystem = "foo" path = "/hello.txt" @@ -27,7 +26,7 @@ data "ignition_file" "hello" { File with remote content: -``` +```hcl data "ignition_file" "hello" { filesystem = "qux" path = "/hello.txt" diff --git a/website/source/docs/providers/ignition/d/filesystem.html.md b/website/source/docs/providers/ignition/d/filesystem.html.md index b8ed0eaf4..68e66f207 100644 --- a/website/source/docs/providers/ignition/d/filesystem.html.md +++ b/website/source/docs/providers/ignition/d/filesystem.html.md @@ -12,7 +12,7 @@ Describes the desired state of a the system’s filesystems to be configured and ## Example Usage -``` +```hcl data "ignition_filesystem" "foo" { name = "root" mount { diff --git a/website/source/docs/providers/ignition/d/group.html.md b/website/source/docs/providers/ignition/d/group.html.md index ba360507a..807e72dd6 100644 --- a/website/source/docs/providers/ignition/d/group.html.md +++ b/website/source/docs/providers/ignition/d/group.html.md @@ -12,7 +12,7 @@ Describes the desired group additions to the passwd database. ## Example Usage -``` +```hcl data "ignition_group" "foo" { name = "foo" } diff --git a/website/source/docs/providers/ignition/d/networkd_unit.html.md b/website/source/docs/providers/ignition/d/networkd_unit.html.md index a60b5e67c..bf45e345b 100644 --- a/website/source/docs/providers/ignition/d/networkd_unit.html.md +++ b/website/source/docs/providers/ignition/d/networkd_unit.html.md @@ -12,7 +12,7 @@ Describes the desired state of the networkd units. ## Example Usage -``` +```hcl data "ignition_networkd_unit" "example" { name = "00-eth0.network" content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7" diff --git a/website/source/docs/providers/ignition/d/raid.html.md b/website/source/docs/providers/ignition/d/raid.html.md index 4cb32da0d..47219a251 100644 --- a/website/source/docs/providers/ignition/d/raid.html.md +++ b/website/source/docs/providers/ignition/d/raid.html.md @@ -12,7 +12,7 @@ Describes the desired state of the system’s RAID. ## Example Usage -``` +```hcl data "ignition_raid" "md" { name = "data" level = "stripe" diff --git a/website/source/docs/providers/ignition/d/systemd_unit.html.md b/website/source/docs/providers/ignition/d/systemd_unit.html.md index 58502caac..a97e32147 100644 --- a/website/source/docs/providers/ignition/d/systemd_unit.html.md +++ b/website/source/docs/providers/ignition/d/systemd_unit.html.md @@ -12,7 +12,7 @@ Describes the desired state of the systemd units. ## Example Usage -``` +```hcl data "ignition_systemd_unit" "example" { name = "example.service" content = "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target" diff --git a/website/source/docs/providers/ignition/d/user.html.md b/website/source/docs/providers/ignition/d/user.html.md index 5c0ec8f8d..c4975a739 100644 --- a/website/source/docs/providers/ignition/d/user.html.md +++ b/website/source/docs/providers/ignition/d/user.html.md @@ -12,7 +12,7 @@ Describes the desired user additions to the passwd database. ## Example Usage -``` +```hcl data "ignition_user" "foo" { name = "foo" home_dir = "/home/foo/" diff --git a/website/source/docs/providers/ignition/index.html.markdown b/website/source/docs/providers/ignition/index.html.markdown index 5b26fc075..25d067470 100644 --- a/website/source/docs/providers/ignition/index.html.markdown +++ b/website/source/docs/providers/ignition/index.html.markdown @@ -18,7 +18,7 @@ Use the navigation to the left to read about the available resources. This config will write a single service unit (shown below) with the contents of an example service. This unit will be enabled as a dependency of multi-user.target and therefore start on boot -``` +```hcl # Systemd unit data resource containing the unit definition data "ignition_systemd_unit" "example" { name = "example.service" diff --git a/website/source/docs/providers/influxdb/index.html.markdown b/website/source/docs/providers/influxdb/index.html.markdown index 45400b231..4b500ac02 100644 --- a/website/source/docs/providers/influxdb/index.html.markdown +++ b/website/source/docs/providers/influxdb/index.html.markdown @@ -28,7 +28,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl provider "influxdb" { url = "http://influxdb.example.com/" username = "terraform" diff --git a/website/source/docs/providers/influxdb/r/continuous_query.html.md b/website/source/docs/providers/influxdb/r/continuous_query.html.md index 1f62af799..7e8b596fc 100644 --- a/website/source/docs/providers/influxdb/r/continuous_query.html.md +++ b/website/source/docs/providers/influxdb/r/continuous_query.html.md @@ -12,7 +12,7 @@ The continuous_query resource allows a continuous query to be created on an Infl ## Example Usage -``` +```hcl resource "influxdb_database" "test" { name = "terraform-test" } diff --git a/website/source/docs/providers/influxdb/r/database.html.md b/website/source/docs/providers/influxdb/r/database.html.md index 0508e5fb3..0e0cf4933 100644 --- a/website/source/docs/providers/influxdb/r/database.html.md +++ b/website/source/docs/providers/influxdb/r/database.html.md @@ -12,7 +12,7 @@ The database resource allows a database to be created on an InfluxDB server. ## Example Usage -``` +```hcl resource "influxdb_database" "metrics" { name = "awesome_app" } diff --git a/website/source/docs/providers/influxdb/r/user.html.md b/website/source/docs/providers/influxdb/r/user.html.md index 6ff0397e4..a1911b5e9 100644 --- a/website/source/docs/providers/influxdb/r/user.html.md +++ b/website/source/docs/providers/influxdb/r/user.html.md @@ -12,7 +12,7 @@ The user resource allows a user to be created on an InfluxDB server. ## Example Usage -``` +```hcl resource "influxdb_database" "green" { name = "terraform-green" } diff --git a/website/source/docs/providers/librato/index.html.markdown b/website/source/docs/providers/librato/index.html.markdown index 58413cca2..b80d83081 100644 --- a/website/source/docs/providers/librato/index.html.markdown +++ b/website/source/docs/providers/librato/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Librato provider provider "librato" { email = "ops@company.com" diff --git a/website/source/docs/providers/librato/r/alert.html.markdown b/website/source/docs/providers/librato/r/alert.html.markdown index 67a904dfd..85dc6a7bf 100644 --- a/website/source/docs/providers/librato/r/alert.html.markdown +++ b/website/source/docs/providers/librato/r/alert.html.markdown @@ -13,7 +13,7 @@ create and manage alerts on Librato. ## Example Usage -``` +```hcl # Create a new Librato alert resource "librato_alert" "myalert" { name = "MyAlert" diff --git a/website/source/docs/providers/librato/r/service.html.markdown b/website/source/docs/providers/librato/r/service.html.markdown index f1b0221d9..1e579bf77 100644 --- a/website/source/docs/providers/librato/r/service.html.markdown +++ b/website/source/docs/providers/librato/r/service.html.markdown @@ -13,7 +13,7 @@ create and manage notification services on Librato. ## Example Usage -``` +```hcl # Create a new Librato service resource "librato_service" "email" { title = "Email the admins" diff --git a/website/source/docs/providers/librato/r/space.html.markdown b/website/source/docs/providers/librato/r/space.html.markdown index 2ddb3401e..1d6670d7a 100644 --- a/website/source/docs/providers/librato/r/space.html.markdown +++ b/website/source/docs/providers/librato/r/space.html.markdown @@ -13,7 +13,7 @@ create and manage spaces on Librato. ## Example Usage -``` +```hcl # Create a new Librato space resource "librato_space" "default" { name = "My New Space" diff --git a/website/source/docs/providers/librato/r/space_chart.html.markdown b/website/source/docs/providers/librato/r/space_chart.html.markdown index 5e5db1e8a..90ad75898 100644 --- a/website/source/docs/providers/librato/r/space_chart.html.markdown +++ b/website/source/docs/providers/librato/r/space_chart.html.markdown @@ -13,7 +13,7 @@ create and manage charts in Librato Spaces. ## Example Usage -``` +```hcl # Create a new Librato space resource "librato_space" "my_space" { name = "My New Space" diff --git a/website/source/docs/providers/logentries/index.html.markdown b/website/source/docs/providers/logentries/index.html.markdown index 77eecc70c..31d9f57f6 100644 --- a/website/source/docs/providers/logentries/index.html.markdown +++ b/website/source/docs/providers/logentries/index.html.markdown @@ -14,7 +14,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Logentries provider provider "logentries" { account_key = "${var.logentries_account_key}" diff --git a/website/source/docs/providers/logentries/r/log.html.markdown b/website/source/docs/providers/logentries/r/log.html.markdown index 44b0da278..dbdbd1d6c 100644 --- a/website/source/docs/providers/logentries/r/log.html.markdown +++ b/website/source/docs/providers/logentries/r/log.html.markdown @@ -12,7 +12,7 @@ Provides a Logentries log resource. ## Example Usage -``` +```hcl # Create a log and add it to the log set resource "logentries_log" "app_log" { logset_id = "${logentries_logset.host_logs.id}" diff --git a/website/source/docs/providers/logentries/r/logset.html.markdown b/website/source/docs/providers/logentries/r/logset.html.markdown index 1c7f5a119..119347ee7 100644 --- a/website/source/docs/providers/logentries/r/logset.html.markdown +++ b/website/source/docs/providers/logentries/r/logset.html.markdown @@ -12,7 +12,7 @@ Provides a Logentries logset resource. A logset is a collection of `logentries_l ## Example Usage -``` +```hcl # Create a log set resource "logentries_logset" "host_logs" { name = "${var.server}-logs" diff --git a/website/source/docs/providers/mailgun/index.html.markdown b/website/source/docs/providers/mailgun/index.html.markdown index b46b5581c..e4057287c 100644 --- a/website/source/docs/providers/mailgun/index.html.markdown +++ b/website/source/docs/providers/mailgun/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Mailgun provider provider "mailgun" { api_key = "${var.mailgun_api_key}" diff --git a/website/source/docs/providers/mailgun/r/domain.html.markdown b/website/source/docs/providers/mailgun/r/domain.html.markdown index f417d7e27..791fa6052 100644 --- a/website/source/docs/providers/mailgun/r/domain.html.markdown +++ b/website/source/docs/providers/mailgun/r/domain.html.markdown @@ -13,7 +13,7 @@ create and manage applications on Mailgun. ## Example Usage -``` +```hcl # Create a new Mailgun domain resource "mailgun_domain" "default" { name = "test.example.com" diff --git a/website/source/docs/providers/newrelic/d/application.html.markdown b/website/source/docs/providers/newrelic/d/application.html.markdown index a0f383952..8d0f4a209 100644 --- a/website/source/docs/providers/newrelic/d/application.html.markdown +++ b/website/source/docs/providers/newrelic/d/application.html.markdown @@ -12,7 +12,7 @@ Use this data source to get information about a specific application in New Reli ## Example Usage -``` +```hcl data "newrelic_application" "app" { name = "my-app" } diff --git a/website/source/docs/providers/newrelic/index.html.markdown b/website/source/docs/providers/newrelic/index.html.markdown index faaa7fc37..ad1e4a3fc 100644 --- a/website/source/docs/providers/newrelic/index.html.markdown +++ b/website/source/docs/providers/newrelic/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the New Relic provider provider "newrelic" { api_key = "${var.newrelic_api_key}" diff --git a/website/source/docs/providers/newrelic/r/alert_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_channel.html.markdown index 906700250..eb533a778 100644 --- a/website/source/docs/providers/newrelic/r/alert_channel.html.markdown +++ b/website/source/docs/providers/newrelic/r/alert_channel.html.markdown @@ -10,7 +10,7 @@ description: |- ## Example Usage -``` +```hcl resource "newrelic_alert_channel" "foo" { name = "foo" type = "email" diff --git a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown index 6b41f92e9..bbbcb2c27 100644 --- a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown +++ b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown @@ -10,7 +10,7 @@ description: |- ## Example Usage -``` +```hcl data "newrelic_application" "app" { name = "my-app" } diff --git a/website/source/docs/providers/newrelic/r/alert_policy.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy.html.markdown index abb77c6f9..fddb20461 100644 --- a/website/source/docs/providers/newrelic/r/alert_policy.html.markdown +++ b/website/source/docs/providers/newrelic/r/alert_policy.html.markdown @@ -10,7 +10,7 @@ description: |- ## Example Usage -``` +```hcl resource "newrelic_alert_policy" "foo" { name = "foo" } diff --git a/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown index fae8a4327..310d14876 100644 --- a/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown +++ b/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown @@ -10,7 +10,7 @@ description: |- ## Example Usage -``` +```hcl resource "newrelic_alert_policy" "foo" { name = "foo" } diff --git a/website/source/docs/providers/ns1/index.html.markdown b/website/source/docs/providers/ns1/index.html.markdown index 7260065a8..c4d4ccf4a 100644 --- a/website/source/docs/providers/ns1/index.html.markdown +++ b/website/source/docs/providers/ns1/index.html.markdown @@ -15,7 +15,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the NS1 provider provider "ns1" { apikey = "${var.ns1_apikey}" diff --git a/website/source/docs/providers/ns1/r/apikey.html.markdown b/website/source/docs/providers/ns1/r/apikey.html.markdown index b44dc1ea0..5a065ada8 100644 --- a/website/source/docs/providers/ns1/r/apikey.html.markdown +++ b/website/source/docs/providers/ns1/r/apikey.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Api Key resource. This can be used to create, modify, and delete ## Example Usage -``` +```hcl resource "ns1_team" "example" { name = "Example team" } diff --git a/website/source/docs/providers/ns1/r/datafeed.html.markdown b/website/source/docs/providers/ns1/r/datafeed.html.markdown index 753c13401..a02dcbbb7 100644 --- a/website/source/docs/providers/ns1/r/datafeed.html.markdown +++ b/website/source/docs/providers/ns1/r/datafeed.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Data Feed resource. This can be used to create, modify, and delet ## Example Usage -``` +```hcl resource "ns1_datasource" "example" { name = "example" sourcetype = "nsone_v1" diff --git a/website/source/docs/providers/ns1/r/datasource.html.markdown b/website/source/docs/providers/ns1/r/datasource.html.markdown index 077299c48..d41c861d9 100644 --- a/website/source/docs/providers/ns1/r/datasource.html.markdown +++ b/website/source/docs/providers/ns1/r/datasource.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Data Source resource. This can be used to create, modify, and del ## Example Usage -``` +```hcl resource "ns1_datasource" "example" { name = "example" sourcetype = "nsone_v1" diff --git a/website/source/docs/providers/ns1/r/monitoringjob.html.markdown b/website/source/docs/providers/ns1/r/monitoringjob.html.markdown index 1def3e4a2..8220c1c6c 100644 --- a/website/source/docs/providers/ns1/r/monitoringjob.html.markdown +++ b/website/source/docs/providers/ns1/r/monitoringjob.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Monitoring Job resource. This can be used to create, modify, and ## Example Usage -``` +```hcl resource "ns1_monitoringjob" "uswest_monitor" { name = "uswest" active = true diff --git a/website/source/docs/providers/ns1/r/notifylist.html.markdown b/website/source/docs/providers/ns1/r/notifylist.html.markdown index 35f6f67e8..af4500bca 100644 --- a/website/source/docs/providers/ns1/r/notifylist.html.markdown +++ b/website/source/docs/providers/ns1/r/notifylist.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Notify List resource. This can be used to create, modify, and del ## Example Usage -``` +```hcl resource "ns1_notifylist" "nl" { name = "my notify list" notifications = { diff --git a/website/source/docs/providers/ns1/r/record.html.markdown b/website/source/docs/providers/ns1/r/record.html.markdown index 05b2bde50..fb03a78f5 100644 --- a/website/source/docs/providers/ns1/r/record.html.markdown +++ b/website/source/docs/providers/ns1/r/record.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Record resource. This can be used to create, modify, and delete r ## Example Usage -``` +```hcl resource "ns1_zone" "tld" { zone = "terraform.example" } diff --git a/website/source/docs/providers/ns1/r/team.html.markdown b/website/source/docs/providers/ns1/r/team.html.markdown index acd137aa7..76cba4e4b 100644 --- a/website/source/docs/providers/ns1/r/team.html.markdown +++ b/website/source/docs/providers/ns1/r/team.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 Team resource. This can be used to create, modify, and delete tea ## Example Usage -``` +```hcl # Create a new NS1 Team resource "ns1_team" "example" { name = "Example team" diff --git a/website/source/docs/providers/ns1/r/user.html.markdown b/website/source/docs/providers/ns1/r/user.html.markdown index 0451f9ce4..50b0faa69 100644 --- a/website/source/docs/providers/ns1/r/user.html.markdown +++ b/website/source/docs/providers/ns1/r/user.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 User resource. Creating a user sends an invitation email to the u ## Example Usage -``` +```hcl resource "ns1_team" "example" { name = "Example team" diff --git a/website/source/docs/providers/ns1/r/zone.html.markdown b/website/source/docs/providers/ns1/r/zone.html.markdown index 17e2450da..bd23ed4be 100644 --- a/website/source/docs/providers/ns1/r/zone.html.markdown +++ b/website/source/docs/providers/ns1/r/zone.html.markdown @@ -12,7 +12,7 @@ Provides a NS1 DNS Zone resource. This can be used to create, modify, and delete ## Example Usage -``` +```hcl # Create a new DNS zone resource "ns1_zone" "example" { zone = "terraform.example.io" diff --git a/website/source/docs/providers/openstack/d/images_image_v2.html.markdown b/website/source/docs/providers/openstack/d/images_image_v2.html.markdown index 70283acf4..5e9f724c3 100644 --- a/website/source/docs/providers/openstack/d/images_image_v2.html.markdown +++ b/website/source/docs/providers/openstack/d/images_image_v2.html.markdown @@ -12,7 +12,7 @@ Use this data source to get the ID of an available OpenStack image. ## Example Usage -``` +```hcl data "openstack_images_image_v2" "ubuntu" { name = "Ubuntu 16.04" most_recent = true diff --git a/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown index 571a698f1..c297769c2 100644 --- a/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown @@ -12,7 +12,7 @@ Use this data source to get the ID of an available OpenStack network. ## Example Usage -``` +```hcl data "openstack_networking_network_v2" "network" { name = "tf_test_network" } diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown index adbb7db72..378af04c1 100644 --- a/website/source/docs/providers/openstack/index.html.markdown +++ b/website/source/docs/providers/openstack/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the OpenStack Provider provider "openstack" { user_name = "admin" diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown index bf5a13f10..f7d078b09 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown @@ -24,7 +24,7 @@ the `openstack_compute_volume_attach_v2` resource for that. ## Example Usage -``` +```hcl resource "openstack_blockstorage_volume_v2" "volume_1" { name = "volume_1" size = 1 diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown index 93fc85dc4..c0282bf7b 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 volume resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_blockstorage_volume_v1" "volume_1" { region = "RegionOne" name = "tf-test-volume" diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown index 4aaa9c575..951084d4d 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 volume resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_blockstorage_volume_v2" "volume_1" { region = "RegionOne" name = "volume_1" diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown index d729e36b5..698869f6e 100644 --- a/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown @@ -15,7 +15,7 @@ Associate a floating IP to an instance. This can be used instead of the ### Automatically detect the correct network -``` +```hcl resource "openstack_compute_instance_v2" "instance_1" { name = "instance_1" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" @@ -36,7 +36,7 @@ resource "openstack_compute_floatingip_associate_v2" "fip_1" { ### Explicitly set the network to attach to -``` +```hcl resource "openstack_compute_instance_v2" "instance_1" { name = "instance_1" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown index cbaee21ce..c00945e13 100644 --- a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown @@ -15,7 +15,7 @@ but only networking floating IPs can be used with load balancers. ## Example Usage -``` +```hcl resource "openstack_compute_floatingip_v2" "floatip_1" { pool = "public" } diff --git a/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown index 7466b4301..87492d433 100644 --- a/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown @@ -14,7 +14,7 @@ Manages a V2 VM instance resource within OpenStack. ### Basic Instance -``` +```hcl resource "openstack_compute_instance_v2" "basic" { name = "basic" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" @@ -34,7 +34,7 @@ resource "openstack_compute_instance_v2" "basic" { ### Instance With Attached Volume -``` +```hcl resource "openstack_blockstorage_volume_v2" "myvol" { name = "myvol" size = 1 @@ -60,7 +60,7 @@ resource "openstack_compute_volume_attach_v2" "attached" { ### Boot From Volume -``` +```hcl resource "openstack_compute_instance_v2" "boot-from-volume" { name = "boot-from-volume" flavor_id = "3" @@ -84,7 +84,7 @@ resource "openstack_compute_instance_v2" "boot-from-volume" { ### Boot From an Existing Volume -``` +```hcl resource "openstack_blockstorage_volume_v1" "myvol" { name = "myvol" size = 5 @@ -113,7 +113,7 @@ resource "openstack_compute_instance_v2" "boot-from-volume" { ### Boot Instance, Create Volume, and Attach Volume as a Block Device -``` +```hcl resource "openstack_compute_instance_v2" "instance_1" { name = "instance_1" image_id = "" @@ -141,7 +141,7 @@ resource "openstack_compute_instance_v2" "instance_1" { ### Boot Instance and Attach Existing Volume as a Block Device -``` +```hcl resource "openstack_blockstorage_volume_v2" "volume_1" { name = "volume_1" size = 1 @@ -174,7 +174,7 @@ resource "openstack_compute_instance_v2" "instance_1" { ### Instance With Multiple Networks -``` +```hcl resource "openstack_networking_floatingip_v2" "myip" { pool = "my_pool" } @@ -204,7 +204,7 @@ resource "openstack_compute_floatingip_associate_v2" "myip" { ### Instance With Personality -``` +```hcl resource "openstack_compute_instance_v2" "personality" { name = "personality" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" @@ -225,7 +225,7 @@ resource "openstack_compute_instance_v2" "personality" { ### Instance with Multiple Ephemeral Disks -``` +```hcl resource "openstack_compute_instance_v2" "multi-eph" { name = "multi_eph" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" @@ -532,7 +532,7 @@ single IP address the user would want returned to the Instance's state information. Therefore, in order for a Provisioner to connect to an Instance via it's network Port, customize the `connection` information: -``` +```hcl resource "openstack_networking_port_v2" "port_1" { name = "port_1" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown index 16bade7a7..9f788060b 100644 --- a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 keypair resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_compute_keypair_v2" "test-keypair" { name = "my-keypair" public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLotBCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAnOfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZqd9LvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TaIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIF61p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB" diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown index d542c6a4f..7385524bb 100644 --- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 security group resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_compute_secgroup_v2" "secgroup_1" { name = "my_secgroup" description = "my security group" @@ -92,7 +92,7 @@ The following attributes are exported: When using ICMP as the `ip_protocol`, the `from_port` sets the ICMP _type_ and the `to_port` sets the ICMP _code_. To allow all ICMP types, set each value to `-1`, like so: -``` +```hcl rule { from_port = -1 to_port = -1 @@ -107,7 +107,7 @@ A list of ICMP types and codes can be found [here](https://en.wikipedia.org/wiki When referencing a security group in a configuration (for example, a configuration creates a new security group and then needs to apply it to an instance being created in the same configuration), it is currently recommended to reference the security group by name and not by ID, like this: -``` +```hcl resource "openstack_compute_instance_v2" "test-server" { name = "tf-test" image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" diff --git a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown index 340d5ed48..75f8f426d 100644 --- a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 Server Group resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_compute_servergroup_v2" "test-sg" { name = "my-sg" policies = ["anti-affinity"] diff --git a/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown index 694eb6c3c..1b9b2fdf1 100644 --- a/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown @@ -13,7 +13,7 @@ Compute (Nova) v2 API. ## Example Usage -``` +```hcl resource "openstack_blockstorage_volume_v2" "volume_1" { name = "volume_1" size = 1 diff --git a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown index 90c626b57..9df57dc08 100644 --- a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown @@ -12,7 +12,7 @@ Manages a v1 firewall resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_fw_rule_v1" "rule_1" { name = "my-rule-1" description = "drop TELNET traffic" diff --git a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown index 6443c9496..e2eabf3a8 100644 --- a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown @@ -12,7 +12,7 @@ Manages a v1 firewall policy resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_fw_rule_v1" "rule_1" { name = "my-rule-1" description = "drop TELNET traffic" diff --git a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown index dbc4d15ba..69ea31a00 100644 --- a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown @@ -12,7 +12,7 @@ Manages a v1 firewall rule resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_fw_rule_v1" "rule_1" { name = "my_rule" description = "drop TELNET traffic" diff --git a/website/source/docs/providers/openstack/r/images_image_v2.html.markdown b/website/source/docs/providers/openstack/r/images_image_v2.html.markdown index 252e40374..91ec99463 100644 --- a/website/source/docs/providers/openstack/r/images_image_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/images_image_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 Image resource within OpenStack Glance. ## Example Usage -``` +```hcl resource "openstack_images_image_v2" "rancheros" { name = "RancherOS" image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" diff --git a/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown index 7062bb348..579d2f744 100644 --- a/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 listener resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_listener_v2" "listener_1" { protocol = "HTTP" protocol_port = 8080 diff --git a/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown index 7da4f3178..e8ee33e9c 100644 --- a/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 loadbalancer resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_loadbalancer_v2" "lb_1" { vip_subnet_id = "d9415786-5f1a-428b-b35f-2f1523e146d2" } diff --git a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown index 00032f6da..a6bf9d544 100644 --- a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 load balancer member resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_member_v1" "member_1" { pool_id = "d9415786-5f1a-428b-b35f-2f1523e146d2" address = "192.168.0.10" diff --git a/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown index e2ff0c637..ea5e7ecfb 100644 --- a/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 member resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_member_v2" "member_1" { address = "192.168.199.23" protocol_port = 8080 diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown index a599da803..a03f748ed 100644 --- a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 load balancer monitor resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_monitor_v1" "monitor_1" { type = "PING" delay = 30 diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown index a60ed9a8f..86c65aa5c 100644 --- a/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 monitor resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_monitor_v2" "monitor_1" { type = "PING" delay = 20 diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown index dae4471ba..54afa16d3 100644 --- a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 load balancer pool resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_pool_v1" "pool_1" { name = "tf_test_lb_pool" protocol = "HTTP" diff --git a/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown index aff0550ad..1613ad72a 100644 --- a/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 pool resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_pool_v2" "pool_1" { protocol = "ProtocolHTTP" lb_method = "ROUND_ROBIN" diff --git a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown index c08e45158..403354a99 100644 --- a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 load balancer vip resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_lb_vip_v1" "vip_1" { name = "tf_test_lb_vip" subnet_id = "12345" diff --git a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown index f9dfb5cd3..a59f9d341 100644 --- a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown @@ -15,7 +15,7 @@ but only compute floating IPs can be used with compute instances. ## Example Usage -``` +```hcl resource "openstack_networking_floatingip_v2" "floatip_1" { pool = "public" } diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index b16c08eaa..5eb4ca0fd 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 Neutron network resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_networking_network_v2" "network_1" { name = "network_1" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown index 9cb62ac40..5cbef5478 100644 --- a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 port resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_networking_network_v2" "network_1" { name = "network_1" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown index a245f5c13..610c04ed1 100644 --- a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 router interface resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_networking_network_v2" "network_1" { name = "tf_test_network" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown index 9a380fda4..fac11c0f1 100644 --- a/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown @@ -12,7 +12,7 @@ Creates a routing entry on a OpenStack V2 router. ## Example Usage -``` +```hcl resource "openstack_networking_router_v2" "router_1" { name = "router_1" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown index 4518226bf..be760746f 100644 --- a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 router resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_networking_router_v2" "router_1" { name = "my_router" external_gateway = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f" diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown index 2770f5783..67db99516 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown @@ -14,7 +14,7 @@ and also allows an admin to target a specific tenant_id. ## Example Usage -``` +```hcl resource "openstack_networking_secgroup_v2" "secgroup_1" { name = "secgroup_1" description = "My neutron security group" diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown index 6d3d8d330..e3f2a30e5 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown @@ -14,7 +14,7 @@ and also allows an admin to target a specific tenant_id. ## Example Usage -``` +```hcl resource "openstack_networking_secgroup_v2" "secgroup_1" { name = "secgroup_1" description = "My neutron security group" @@ -61,7 +61,7 @@ Terraform, so if you prefer to have *all* aspects of your infrastructure managed by Terraform, set `delete_default_rules` to `true` and then create separate security group rules such as the following: -``` +```hcl resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_v4" { direction = "egress" ethertype = "IPv4" diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown index b604484d4..0d094591c 100644 --- a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown @@ -12,7 +12,7 @@ Manages a V2 Neutron subnet resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_networking_network_v2" "network_1" { name = "tf_test_network" admin_state_up = "true" diff --git a/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown b/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown index 807927240..21ca25c6a 100644 --- a/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown @@ -12,7 +12,7 @@ Manages a V1 container resource within OpenStack. ## Example Usage -``` +```hcl resource "openstack_objectstorage_container_v1" "container_1" { region = "RegionOne" name = "tf-test-container-1" diff --git a/website/source/docs/providers/opsgenie/d/user.html.markdown b/website/source/docs/providers/opsgenie/d/user.html.markdown index c0424197b..253a10df3 100644 --- a/website/source/docs/providers/opsgenie/d/user.html.markdown +++ b/website/source/docs/providers/opsgenie/d/user.html.markdown @@ -12,7 +12,7 @@ Use this data source to get information about a specific user within OpsGenie. ## Example Usage -``` +```hcl data "opsgenie_user" "cookie_monster" { username = "me@cookie-monster.com" } diff --git a/website/source/docs/providers/opsgenie/index.html.markdown b/website/source/docs/providers/opsgenie/index.html.markdown index 849779435..526764fc0 100644 --- a/website/source/docs/providers/opsgenie/index.html.markdown +++ b/website/source/docs/providers/opsgenie/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the OpenStack Provider provider "opsgenie" { api_key = "key" diff --git a/website/source/docs/providers/opsgenie/r/team.html.markdown b/website/source/docs/providers/opsgenie/r/team.html.markdown index 45cfdf988..3991ee680 100644 --- a/website/source/docs/providers/opsgenie/r/team.html.markdown +++ b/website/source/docs/providers/opsgenie/r/team.html.markdown @@ -12,7 +12,7 @@ Manages a Team within OpsGenie. ## Example Usage -``` +```hcl resource "opsgenie_user" "first" { username = "user@domain.com" full_name = "Cookie Monster" diff --git a/website/source/docs/providers/opsgenie/r/user.html.markdown b/website/source/docs/providers/opsgenie/r/user.html.markdown index 0194fda33..88cb904bb 100644 --- a/website/source/docs/providers/opsgenie/r/user.html.markdown +++ b/website/source/docs/providers/opsgenie/r/user.html.markdown @@ -12,7 +12,7 @@ Manages a User within OpsGenie. ## Example Usage -``` +```hcl resource "opsgenie_user" "test" { username = "user@domain.com" full_name = "Cookie Monster" diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown index 6663127be..c29833542 100644 --- a/website/source/docs/providers/packet/index.html.markdown +++ b/website/source/docs/providers/packet/index.html.markdown @@ -15,7 +15,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Packet Provider provider "packet" { auth_token = "${var.auth_token}" diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown index 91dfbdd90..5b8cb1615 100644 --- a/website/source/docs/providers/packet/r/device.html.markdown +++ b/website/source/docs/providers/packet/r/device.html.markdown @@ -13,7 +13,7 @@ modify, and delete devices. ## Example Usage -``` +```hcl # Create a device and add it to cool_project resource "packet_device" "web1" { hostname = "tf.coreos2" diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown index 0aad6557f..f7ee09db3 100644 --- a/website/source/docs/providers/packet/r/project.html.markdown +++ b/website/source/docs/providers/packet/r/project.html.markdown @@ -13,7 +13,7 @@ in your projects. ## Example Usage -``` +```hcl # Create a new Project resource "packet_project" "tf_project_1" { name = "Terraform Fun" diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown index 6fbe6f70a..7eaaa042a 100644 --- a/website/source/docs/providers/packet/r/ssh_key.html.markdown +++ b/website/source/docs/providers/packet/r/ssh_key.html.markdown @@ -15,7 +15,7 @@ device creation. ## Example Usage -``` +```hcl # Create a new SSH key resource "packet_ssh_key" "key1" { name = "terraform-1" diff --git a/website/source/docs/providers/packet/r/volume.html.markdown b/website/source/docs/providers/packet/r/volume.html.markdown index 5b654bc91..3ea4a292f 100644 --- a/website/source/docs/providers/packet/r/volume.html.markdown +++ b/website/source/docs/providers/packet/r/volume.html.markdown @@ -16,7 +16,7 @@ scripts. ## Example Usage -``` +```hcl # Create a new block volume resource "packet_volume" "volume1" { description = "terraform-volume-1" diff --git a/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown b/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown index 7b0362996..916ed72c8 100644 --- a/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown +++ b/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown @@ -14,7 +14,7 @@ Use this data source to get information about a specific [escalation policy][1] ## Example Usage -``` +```hcl data "pagerduty_escalation_policy" "test" { name = "Engineering Escalation Policy" } diff --git a/website/source/docs/providers/pagerduty/d/schedule.html.markdown b/website/source/docs/providers/pagerduty/d/schedule.html.markdown index 0127f2b46..842e0f7d4 100644 --- a/website/source/docs/providers/pagerduty/d/schedule.html.markdown +++ b/website/source/docs/providers/pagerduty/d/schedule.html.markdown @@ -14,7 +14,7 @@ Use this data source to get information about a specific [schedule][1] that you ## Example Usage -``` +```hcl data "pagerduty_schedule" "test" { name = "Daily Engineering Rotation" } diff --git a/website/source/docs/providers/pagerduty/d/user.html.markdown b/website/source/docs/providers/pagerduty/d/user.html.markdown index 216851aa0..3af946c02 100644 --- a/website/source/docs/providers/pagerduty/d/user.html.markdown +++ b/website/source/docs/providers/pagerduty/d/user.html.markdown @@ -12,7 +12,7 @@ Use this data source to get information about a specific [user][1] that you can ## Example Usage -``` +```hcl data "pagerduty_user" "me" { email = "me@example.com" } diff --git a/website/source/docs/providers/pagerduty/d/vendor.html.markdown b/website/source/docs/providers/pagerduty/d/vendor.html.markdown index 5e59d33fe..dc2300cbb 100644 --- a/website/source/docs/providers/pagerduty/d/vendor.html.markdown +++ b/website/source/docs/providers/pagerduty/d/vendor.html.markdown @@ -12,7 +12,7 @@ Use this data source to get information about a specific [vendor][1] that you ca ## Example Usage -``` +```hcl data "pagerduty_vendor" "datadog" { name = "Datadog" } diff --git a/website/source/docs/providers/pagerduty/index.html.markdown b/website/source/docs/providers/pagerduty/index.html.markdown index 5b34c7c2f..ff3a1bd2f 100644 --- a/website/source/docs/providers/pagerduty/index.html.markdown +++ b/website/source/docs/providers/pagerduty/index.html.markdown @@ -14,7 +14,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the PagerDuty provider provider "pagerduty" { token = "${var.pagerduty_token}" diff --git a/website/source/docs/providers/pagerduty/r/addon.html.markdown b/website/source/docs/providers/pagerduty/r/addon.html.markdown index 6bbd17ca1..21f4f0398 100644 --- a/website/source/docs/providers/pagerduty/r/addon.html.markdown +++ b/website/source/docs/providers/pagerduty/r/addon.html.markdown @@ -12,7 +12,7 @@ With [add-ons](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Add-on ## Example Usage -``` +```hcl resource "pagerduty_addon" "example" { name = "Internal Status Page" src = "https://intranet.example.com/status" diff --git a/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown b/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown index 00ad72633..7dcb04dff 100644 --- a/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown +++ b/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown @@ -13,7 +13,7 @@ An [escalation policy](https://v2.developer.pagerduty.com/v2/page/api-reference# ## Example Usage -``` +```hcl resource "pagerduty_team" "example" { name = "Engineering" description = "All engineering" diff --git a/website/source/docs/providers/pagerduty/r/schedule.html.markdown b/website/source/docs/providers/pagerduty/r/schedule.html.markdown index 6f1d0e7bd..9bb1f95be 100644 --- a/website/source/docs/providers/pagerduty/r/schedule.html.markdown +++ b/website/source/docs/providers/pagerduty/r/schedule.html.markdown @@ -13,7 +13,7 @@ A [schedule](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Schedule ## Example Usage -``` +```hcl resource "pagerduty_user" "example" { name = "Earline Greenholt" email = "125.greenholt.earline@graham.name" diff --git a/website/source/docs/providers/pagerduty/r/service.html.markdown b/website/source/docs/providers/pagerduty/r/service.html.markdown index b5ba884c7..7f3fd9da0 100644 --- a/website/source/docs/providers/pagerduty/r/service.html.markdown +++ b/website/source/docs/providers/pagerduty/r/service.html.markdown @@ -13,7 +13,7 @@ A [service](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Services/ ## Example Usage -``` +```hcl resource "pagerduty_user" "example" { name = "Earline Greenholt" email = "125.greenholt.earline@graham.name" @@ -79,7 +79,7 @@ When using `type = "use_support_hours"` in the `incident_urgency_rule` block you Below is an example for a `pagerduty_service` resource with `incident_urgency_rules` with `type = "use_support_hours"`, `support_hours` and a default `scheduled_action` as well. -``` +```hcl resource "pagerduty_service" "foo" { name = "bar" description = "bar bar bar" diff --git a/website/source/docs/providers/pagerduty/r/service_integration.html.markdown b/website/source/docs/providers/pagerduty/r/service_integration.html.markdown index abe98617d..ad4cd3046 100644 --- a/website/source/docs/providers/pagerduty/r/service_integration.html.markdown +++ b/website/source/docs/providers/pagerduty/r/service_integration.html.markdown @@ -12,7 +12,7 @@ A [service integration](https://v2.developer.pagerduty.com/v2/page/api-reference ## Example Usage -``` +```hcl resource "pagerduty_user" "example" { name = "Earline Greenholt" email = "125.greenholt.earline@graham.name" diff --git a/website/source/docs/providers/pagerduty/r/team.html.markdown b/website/source/docs/providers/pagerduty/r/team.html.markdown index d77621dd6..821d8f777 100644 --- a/website/source/docs/providers/pagerduty/r/team.html.markdown +++ b/website/source/docs/providers/pagerduty/r/team.html.markdown @@ -14,7 +14,7 @@ The account must have the `teams` ability to use the following resource. ## Example Usage -``` +```hcl resource "pagerduty_team" "example" { name = "Engineering" description = "All engineering" diff --git a/website/source/docs/providers/pagerduty/r/user.html.markdown b/website/source/docs/providers/pagerduty/r/user.html.markdown index 307beb377..d4cf11214 100644 --- a/website/source/docs/providers/pagerduty/r/user.html.markdown +++ b/website/source/docs/providers/pagerduty/r/user.html.markdown @@ -13,7 +13,7 @@ A [user](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Users/get_us ## Example Usage -``` +```hcl resource "pagerduty_team" "example" { name = "Engineering" description = "All engineering" diff --git a/website/source/docs/providers/postgresql/index.html.markdown b/website/source/docs/providers/postgresql/index.html.markdown index 16277e396..b586ba0b5 100644 --- a/website/source/docs/providers/postgresql/index.html.markdown +++ b/website/source/docs/providers/postgresql/index.html.markdown @@ -14,7 +14,7 @@ Use the navigation to the left to read about the available resources. ## Usage -``` +```hcl provider "postgresql" { host = "postgres_server_ip" port = 5432 @@ -28,7 +28,7 @@ provider "postgresql" { Configuring multiple servers can be done by specifying the alias option. -``` +```hcl provider "postgresql" { alias = "pg1" host = "postgres_server_ip1" diff --git a/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown index 4ac8ecf9f..3ac0391c3 100644 --- a/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown +++ b/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown @@ -15,7 +15,7 @@ within a PostgreSQL server instance. ## Usage -``` +```hcl resource "postgresql_database" "my_db" { name = "my_db" owner = "my_role" @@ -87,7 +87,7 @@ resource "postgresql_database" "my_db" { `postgresql_database` supports importing resources. Supposing the following Terraform: -``` +```hcl provider "postgresql" { alias = "admindb" } diff --git a/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown index 946d000e1..64f7cafc0 100644 --- a/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown +++ b/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown @@ -14,7 +14,7 @@ server. ## Usage -``` +```hcl resource "postgresql_extension" "my_extension" { name = "pg_trgm" } diff --git a/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown index c14f181f0..78f52e80e 100644 --- a/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown +++ b/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown @@ -26,7 +26,7 @@ and all but the final ``postgresql_role`` must specify a `skip_drop_role`. ## Usage -``` +```hcl resource "postgresql_role" "my_role" { name = "my_role" login = true @@ -116,7 +116,7 @@ resource "postgresql_role" "my_replication_role" { `postgresql_role` supports importing resources. Supposing the following Terraform: -``` +```hcl provider "postgresql" { alias = "admindb" } diff --git a/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown index e6dc78bce..43a148f23 100644 --- a/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown +++ b/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown @@ -15,7 +15,7 @@ a PostgreSQL database. ## Usage -``` +```hcl resource "postgresql_role" "app_www" { name = "app_www" } @@ -76,7 +76,7 @@ The `policy` block supports: `postgresql_schema` supports importing resources. Supposing the following Terraform: -``` +```hcl resource "postgresql_schema" "public" { name = "public" } diff --git a/website/source/docs/providers/powerdns/index.html.markdown b/website/source/docs/providers/powerdns/index.html.markdown index 40fe15103..f68a644bf 100644 --- a/website/source/docs/providers/powerdns/index.html.markdown +++ b/website/source/docs/providers/powerdns/index.html.markdown @@ -15,7 +15,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the PowerDNS provider provider "powerdns" { api_key = "${var.pdns_api_key}" diff --git a/website/source/docs/providers/powerdns/r/record.html.markdown b/website/source/docs/providers/powerdns/r/record.html.markdown index bc24c7e79..22cf3b433 100644 --- a/website/source/docs/providers/powerdns/r/record.html.markdown +++ b/website/source/docs/providers/powerdns/r/record.html.markdown @@ -16,7 +16,7 @@ Note that PowerDNS internally lowercases certain records (e.g. CNAME and AAAA), For the v1 API (PowerDNS version 4): -``` +```hcl # Add a record to the zone resource "powerdns_record" "foobar" { zone = "example.com." @@ -29,7 +29,7 @@ resource "powerdns_record" "foobar" { For the legacy API (PowerDNS version 3.4): -``` +```hcl # Add a record to the zone resource "powerdns_record" "foobar" { zone = "example.com" diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown index 0fd279973..05375bb78 100644 --- a/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown +++ b/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown @@ -12,7 +12,7 @@ The data centers data source can be used to search for and return an existing Vi ## Example Usage -``` +```hcl data "profitbricks_datacenter" "dc_example" { name = "test_dc" location = "us" diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown index 76c76470e..1f604b8c7 100644 --- a/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown +++ b/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown @@ -12,7 +12,7 @@ The images data source can be used to search for and return an existing image wh ## Example Usage -``` +```hcl data "profitbricks_image" "image_example" { name = "Ubuntu" type = "HDD" diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown index a2301db6e..e50f18fc1 100644 --- a/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown +++ b/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown @@ -12,7 +12,7 @@ The locations data source can be used to search for and return an existing locat ## Example Usage -``` +```hcl data "profitbricks_location" "loc1" { name = "karlsruhe" feature = "SSD" diff --git a/website/source/docs/providers/profitbricks/index.html.markdown b/website/source/docs/providers/profitbricks/index.html.markdown index 5d8fec358..22bd5b54d 100644 --- a/website/source/docs/providers/profitbricks/index.html.markdown +++ b/website/source/docs/providers/profitbricks/index.html.markdown @@ -18,7 +18,7 @@ Use the navigation to the left to read about the available resources. The provider needs to be configured with proper credentials before it can be used. -``` +```hcl $ export PROFITBRICKS_USERNAME="profitbricks_username" $ export PROFITBRICKS_PASSWORD="profitbricks_password" $ export PROFITBRICKS_API_URL="profitbricks_rest_url" @@ -32,7 +32,7 @@ The credentials provided in `.tf` file will override credentials in the environm ## Example Usage -``` +```hcl provider "profitbricks" { username = "profitbricks_username" password = "profitbricks_password" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown index 53d57c839..12b1612c1 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown @@ -12,7 +12,7 @@ Manages a Virtual Data Center on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_datacenter" "example" { name = "datacenter name" location = "us/las" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown index e402d2f98..dfb0d0f86 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown @@ -12,7 +12,7 @@ Manages a Firewall Rules on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_firewall" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" server_id = "${profitbricks_server.example.id}" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown index b65ca1cdf..1729eef8c 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown @@ -12,7 +12,7 @@ Manages a IP Blocks on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_ipblock" "example" { location = "${profitbricks_datacenter.example.location}" size = 1 diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown index b7c419741..66dd03623 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown @@ -12,7 +12,7 @@ Manages a LANs on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_lan" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" public = true diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown index 3893a9086..11a1657bd 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown @@ -12,7 +12,7 @@ Manages a Load Balancers on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_loadbalancer" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" nic_id = "${profitbricks_nic.example.id}" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown index bb9348822..e0338f728 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown @@ -12,7 +12,7 @@ Manages a NICs on ProfitBricks ## Example Usage -``` +```hcl resource "profitbricks_nic" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" server_id = "${profitbricks_server.example.id}" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown index a5dda6455..68ef48d3f 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown @@ -14,7 +14,7 @@ Manages a Servers on ProfitBricks This resource will create an operational server. After this section completes, the provisioner can be called. -``` +```hcl resource "profitbricks_server" "example" { name = "server" datacenter_id = "${profitbricks_datacenter.example.id}" diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown index ccea2b5d3..fb4ee5f3c 100644 --- a/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown +++ b/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown @@ -14,7 +14,7 @@ Manages a Volumes on ProfitBricks A primary volume will be created with the server. If there is a need for additional volume, this resource handles it. -``` +```hcl resource "profitbricks_volume" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" server_id = "${profitbricks_server.example.id}" diff --git a/website/source/docs/providers/rabbitmq/index.html.markdown b/website/source/docs/providers/rabbitmq/index.html.markdown index b420742ae..11f4d7881 100644 --- a/website/source/docs/providers/rabbitmq/index.html.markdown +++ b/website/source/docs/providers/rabbitmq/index.html.markdown @@ -18,7 +18,7 @@ Use the navigation to the left to read about the available resources. The following is a minimal example: -``` +```hcl # Configure the RabbitMQ provider provider "rabbitmq" { endpoint = "http://127.0.0.1" diff --git a/website/source/docs/providers/rabbitmq/r/binding.html.markdown b/website/source/docs/providers/rabbitmq/r/binding.html.markdown index ec83e786a..da797065e 100644 --- a/website/source/docs/providers/rabbitmq/r/binding.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/binding.html.markdown @@ -13,7 +13,7 @@ between a queue an exchange. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "test" { name = "test" } @@ -88,5 +88,5 @@ Bindings can be imported using the `id` which is composed of `vhost/source/destination/destination_type/properties_key`. E.g. ``` -terraform import rabbitmq_binding.test test/test/test/queue/%23 +$ terraform import rabbitmq_binding.test test/test/test/queue/%23 ``` diff --git a/website/source/docs/providers/rabbitmq/r/exchange.html.markdown b/website/source/docs/providers/rabbitmq/r/exchange.html.markdown index bd71db40c..bb5fc016c 100644 --- a/website/source/docs/providers/rabbitmq/r/exchange.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/exchange.html.markdown @@ -12,7 +12,7 @@ The ``rabbitmq_exchange`` resource creates and manages an exchange. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "test" { name = "test" } diff --git a/website/source/docs/providers/rabbitmq/r/permissions.html.markdown b/website/source/docs/providers/rabbitmq/r/permissions.html.markdown index 1c25cdaf7..f30703547 100644 --- a/website/source/docs/providers/rabbitmq/r/permissions.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/permissions.html.markdown @@ -13,7 +13,7 @@ permissions. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "test" { name = "test" } diff --git a/website/source/docs/providers/rabbitmq/r/policy.html.markdown b/website/source/docs/providers/rabbitmq/r/policy.html.markdown index 50744c814..951c8ac42 100644 --- a/website/source/docs/providers/rabbitmq/r/policy.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/policy.html.markdown @@ -13,7 +13,7 @@ and queues. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "test" { name = "test" } diff --git a/website/source/docs/providers/rabbitmq/r/queue.html.markdown b/website/source/docs/providers/rabbitmq/r/queue.html.markdown index 1c3d89239..44abb50e3 100644 --- a/website/source/docs/providers/rabbitmq/r/queue.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/queue.html.markdown @@ -12,7 +12,7 @@ The ``rabbitmq_queue`` resource creates and manages a queue. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "test" { name = "test" } diff --git a/website/source/docs/providers/rabbitmq/r/user.html.markdown b/website/source/docs/providers/rabbitmq/r/user.html.markdown index 2c3f9893d..e9e6e3183 100644 --- a/website/source/docs/providers/rabbitmq/r/user.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/user.html.markdown @@ -15,7 +15,7 @@ The ``rabbitmq_user`` resource creates and manages a user. ## Example Usage -``` +```hcl resource "rabbitmq_user" "test" { name = "mctest" password = "foobar" diff --git a/website/source/docs/providers/rabbitmq/r/vhost.html.markdown b/website/source/docs/providers/rabbitmq/r/vhost.html.markdown index 4ba1b3e65..fc06d135e 100644 --- a/website/source/docs/providers/rabbitmq/r/vhost.html.markdown +++ b/website/source/docs/providers/rabbitmq/r/vhost.html.markdown @@ -12,7 +12,7 @@ The ``rabbitmq_vhost`` resource creates and manages a vhost. ## Example Usage -``` +```hcl resource "rabbitmq_vhost" "my_vhost" { name = "my_vhost" } diff --git a/website/source/docs/providers/scaleway/d/bootscript.html.markdown b/website/source/docs/providers/scaleway/d/bootscript.html.markdown index 5b7664e2b..69c75ac50 100644 --- a/website/source/docs/providers/scaleway/d/bootscript.html.markdown +++ b/website/source/docs/providers/scaleway/d/bootscript.html.markdown @@ -13,7 +13,7 @@ Use this data source to get the ID of a registered Bootscript for use with the ## Example Usage -``` +```hcl data "scaleway_bootscript" "debug" { architecture = "arm" name_filter = "Rescue" diff --git a/website/source/docs/providers/scaleway/d/image.html.markdown b/website/source/docs/providers/scaleway/d/image.html.markdown index 983dfeb5f..e485d2867 100644 --- a/website/source/docs/providers/scaleway/d/image.html.markdown +++ b/website/source/docs/providers/scaleway/d/image.html.markdown @@ -13,7 +13,7 @@ Use this data source to get the ID of a registered Image for use with the ## Example Usage -``` +```hcl data "scaleway_image" "ubuntu" { architecture = "arm" name = "Ubuntu Precise" diff --git a/website/source/docs/providers/scaleway/r/ip.html.markdown b/website/source/docs/providers/scaleway/r/ip.html.markdown index cb87dbcba..33630e34a 100644 --- a/website/source/docs/providers/scaleway/r/ip.html.markdown +++ b/website/source/docs/providers/scaleway/r/ip.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](https://developer.sca ## Example Usage -``` +```hcl resource "scaleway_ip" "test_ip" {} ``` diff --git a/website/source/docs/providers/scaleway/r/security_group.html.markdown b/website/source/docs/providers/scaleway/r/security_group.html.markdown index 572cacc42..91293e908 100644 --- a/website/source/docs/providers/scaleway/r/security_group.html.markdown +++ b/website/source/docs/providers/scaleway/r/security_group.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](https://developer.sca ## Example Usage -``` +```hcl resource "scaleway_security_group" "test" { name = "test" description = "test" diff --git a/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown b/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown index 9bfc57e3d..ecf84e589 100644 --- a/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown +++ b/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](https://developer.sca ## Example Usage -``` +```hcl resource "scaleway_security_group" "test" { name = "test" description = "test" diff --git a/website/source/docs/providers/scaleway/r/server.html.markdown b/website/source/docs/providers/scaleway/r/server.html.markdown index 38cba81c0..9a9459764 100644 --- a/website/source/docs/providers/scaleway/r/server.html.markdown +++ b/website/source/docs/providers/scaleway/r/server.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](https://developer.sca ## Example Usage -``` +```hcl resource "scaleway_server" "test" { name = "test" image = "5faef9cd-ea9b-4a63-9171-9e26bec03dbc" diff --git a/website/source/docs/providers/scaleway/r/volume.html.markdown b/website/source/docs/providers/scaleway/r/volume.html.markdown index e0bb7ae1b..4aa2be147 100644 --- a/website/source/docs/providers/scaleway/r/volume.html.markdown +++ b/website/source/docs/providers/scaleway/r/volume.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](https://developer.sca ## Example Usage -``` +```hcl resource "scaleway_server" "test" { name = "test" image = "aecaed73-51a5-4439-a127-6d8229847145" diff --git a/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown b/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown index f6e7533c2..05e89fc81 100644 --- a/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown +++ b/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown @@ -15,7 +15,7 @@ to downtime if the server is already in use. ## Example Usage -``` +```hcl resource "scaleway_server" "test" { name = "test" image = "aecaed73-51a5-4439-a127-6d8229847145" diff --git a/website/source/docs/providers/softlayer/r/ssh_key.html.markdown b/website/source/docs/providers/softlayer/r/ssh_key.html.markdown index 479240aba..571ad6c6c 100644 --- a/website/source/docs/providers/softlayer/r/ssh_key.html.markdown +++ b/website/source/docs/providers/softlayer/r/ssh_key.html.markdown @@ -13,7 +13,7 @@ For additional details please refer to [API documentation](http://sldn.softlayer ## Example Usage -``` +```hcl resource "softlayer_ssh_key" "test_ssh_key" { name = "test_ssh_key_name" notes = "test_ssh_key_notes" diff --git a/website/source/docs/providers/spotinst/index.html.markdown b/website/source/docs/providers/spotinst/index.html.markdown index ea675a3bb..d0e39b049 100755 --- a/website/source/docs/providers/spotinst/index.html.markdown +++ b/website/source/docs/providers/spotinst/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the Spotinst provider provider "spotinst" { email = "${var.spotinst_email}" diff --git a/website/source/docs/providers/spotinst/r/aws_group.html.markdown b/website/source/docs/providers/spotinst/r/aws_group.html.markdown index 8eeb2be9c..b7c786141 100755 --- a/website/source/docs/providers/spotinst/r/aws_group.html.markdown +++ b/website/source/docs/providers/spotinst/r/aws_group.html.markdown @@ -12,7 +12,7 @@ Provides a Spotinst AWS group resource. ## Example Usage -``` +```hcl # Create an AWS group resource "spotinst_aws_group" "workers" { name = "workers-group" diff --git a/website/source/docs/providers/statuscake/index.html.markdown b/website/source/docs/providers/statuscake/index.html.markdown index 9a4d0fb29..d25a86654 100644 --- a/website/source/docs/providers/statuscake/index.html.markdown +++ b/website/source/docs/providers/statuscake/index.html.markdown @@ -23,7 +23,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl provider "statuscake" { username = "testuser" apikey = "12345ddfnakn" diff --git a/website/source/docs/providers/statuscake/r/test.html.markdown b/website/source/docs/providers/statuscake/r/test.html.markdown index 7623d0d46..0a1df9f12 100644 --- a/website/source/docs/providers/statuscake/r/test.html.markdown +++ b/website/source/docs/providers/statuscake/r/test.html.markdown @@ -12,7 +12,7 @@ The test resource allows StatusCake tests to be managed by Terraform. ## Example Usage -``` +```hcl resource "statuscake_test" "google" { website_name = "google.com" website_url = "www.google.com" diff --git a/website/source/docs/providers/triton/index.html.markdown b/website/source/docs/providers/triton/index.html.markdown index 1660ad718..7dc866d27 100644 --- a/website/source/docs/providers/triton/index.html.markdown +++ b/website/source/docs/providers/triton/index.html.markdown @@ -14,7 +14,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl provider "triton" { account = "AccountName" key_id = "25:d4:a9:fe:ef:e6:c0:bf:b4:4b:4b:d4:a8:8f:01:0f" diff --git a/website/source/docs/providers/triton/r/triton_fabric.html.markdown b/website/source/docs/providers/triton/r/triton_fabric.html.markdown index 609690cd7..cd783d87d 100644 --- a/website/source/docs/providers/triton/r/triton_fabric.html.markdown +++ b/website/source/docs/providers/triton/r/triton_fabric.html.markdown @@ -14,8 +14,7 @@ The `triton_fabric` resource represents an fabric for a Triton account. The fabr ### Create a fabric - -``` +```hcl resource "triton_fabric" "dmz" { vlan_id = 100 name = "dmz" @@ -32,7 +31,6 @@ resource "triton_fabric" "dmz" { The following arguments are supported: - * `name` - (String, Required, Change forces new resource) Network name. @@ -63,9 +61,6 @@ The following arguments are supported: * `vlan_id` - (Int, Required, Change forces new resource) VLAN id the network is on. Number between 0-4095 indicating VLAN ID. - - - ## Attribute Reference The following attributes are exported: @@ -82,7 +77,3 @@ The following attributes are exported: * `routes` - (Map) - Map of CIDR block to Gateway IP address. * `internet_nat` - (Bool) - If a NAT zone is provisioned at Gateway IP address. * `vlan_id` - (Int) - VLAN id the network is on. Number between 0-4095 indicating VLAN ID. - - - - diff --git a/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown b/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown index 1bb815f33..0e09eb8ab 100644 --- a/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown +++ b/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown @@ -14,38 +14,31 @@ The `triton_firewall_rule` resource represents a rule for the Triton cloud firew ### Allow web traffic on ports tcp/80 and tcp/443 to machines with the 'www' tag from any source - -``` +```hcl resource "triton_firewall_rule" "www" { rule = "FROM any TO tag www ALLOW tcp (PORT 80 AND PORT 443)" enabled = true } ``` - ### Allow ssh traffic on port tcp/22 to all machines from known remote IPs - -``` +```hcl resource "triton_firewall_rule" "22" { rule = "FROM IP (IP w.x.y.z OR IP w.x.y.z) TO all vms ALLOW tcp port 22" enabled = true } ``` - - ### Block IMAP traffic on port tcp/143 to all machines -``` +```hcl resource "triton_firewall_rule" "imap" { rule = "FROM any TO all vms BLOCK tcp port 143" enabled = true } ``` - - ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/triton/r/triton_key.html.markdown b/website/source/docs/providers/triton/r/triton_key.html.markdown index 4d18fdc64..83763880e 100644 --- a/website/source/docs/providers/triton/r/triton_key.html.markdown +++ b/website/source/docs/providers/triton/r/triton_key.html.markdown @@ -14,8 +14,7 @@ The `triton_key` resource represents an SSH key for a Triton account. Create a key - -``` +```hcl resource "triton_key" "example" { name = "Example Key" key = "${file("keys/id_rsa")}" @@ -31,4 +30,3 @@ The following arguments are supported: * `key` - (string, Required, Change forces new resource) The SSH key material. In order to read this from a file, use the `file` interpolation. - diff --git a/website/source/docs/providers/triton/r/triton_machine.html.markdown b/website/source/docs/providers/triton/r/triton_machine.html.markdown index c5f4d851d..f01fd8005 100644 --- a/website/source/docs/providers/triton/r/triton_machine.html.markdown +++ b/website/source/docs/providers/triton/r/triton_machine.html.markdown @@ -14,8 +14,7 @@ The `triton_machine` resource represents a virtual machine or infrastructure con ### Run a SmartOS base-64 machine. - -``` +```hcl resource "triton_machine" "test-smartos" { name = "test-smartos" package = "g3-standard-0.25-smartos" @@ -29,7 +28,7 @@ resource "triton_machine" "test-smartos" { ### Run an Ubuntu 14.04 LTS machine. -``` +```hcl resource "triton_machine" "test-ubuntu" { name = "test-ubuntu" package = "g4-general-4G" @@ -44,8 +43,6 @@ resource "triton_machine" "test-ubuntu" { } ## resource ``` - - ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/triton/r/triton_vlan.html.markdown b/website/source/docs/providers/triton/r/triton_vlan.html.markdown index 838cc4393..a7515f3f7 100644 --- a/website/source/docs/providers/triton/r/triton_vlan.html.markdown +++ b/website/source/docs/providers/triton/r/triton_vlan.html.markdown @@ -14,8 +14,7 @@ The `triton_vlan` resource represents an Triton VLAN. A VLAN provides a low leve ### Create a VLAN - -``` +```hcl resource "triton_vlan" "dmz" { vlan_id = 100 name = "dmz" diff --git a/website/source/docs/providers/ultradns/index.html.markdown b/website/source/docs/providers/ultradns/index.html.markdown index e01267526..3d1a9b0f7 100644 --- a/website/source/docs/providers/ultradns/index.html.markdown +++ b/website/source/docs/providers/ultradns/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the UltraDNS provider provider "ultradns" { username = "${var.ultradns_username}" diff --git a/website/source/docs/providers/ultradns/r/dirpool.html.markdown b/website/source/docs/providers/ultradns/r/dirpool.html.markdown index 4faf3c2ab..c0377ad9d 100644 --- a/website/source/docs/providers/ultradns/r/dirpool.html.markdown +++ b/website/source/docs/providers/ultradns/r/dirpool.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an UltraDNS Directional Controller pool resource. ## Example Usage -``` + +```hcl # Create a Directional Controller pool resource "ultradns_dirpool" "pool" { zone = "${var.ultradns_domain}" diff --git a/website/source/docs/providers/ultradns/r/probe_http.html.markdown b/website/source/docs/providers/ultradns/r/probe_http.html.markdown index 72a57a3e3..80e7847c7 100644 --- a/website/source/docs/providers/ultradns/r/probe_http.html.markdown +++ b/website/source/docs/providers/ultradns/r/probe_http.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an UltraDNS HTTP probe ## Example Usage -``` + +```hcl resource "ultradns_probe_http" "probe" { zone = "${ultradns_tcpool.pool.zone}" name = "${ultradns_tcpool.pool.name}" diff --git a/website/source/docs/providers/ultradns/r/probe_ping.html.markdown b/website/source/docs/providers/ultradns/r/probe_ping.html.markdown index 0c0b3a79a..e680e255b 100644 --- a/website/source/docs/providers/ultradns/r/probe_ping.html.markdown +++ b/website/source/docs/providers/ultradns/r/probe_ping.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an UltraDNS ping probe ## Example Usage -``` + +```hcl resource "ultradns_probe_ping" "probe" { zone = "${ultradns_tcpool.pool.zone}" name = "${ultradns_tcpool.pool.name}" diff --git a/website/source/docs/providers/ultradns/r/record.html.markdown b/website/source/docs/providers/ultradns/r/record.html.markdown index 9b9ed428d..ca8da9dfb 100644 --- a/website/source/docs/providers/ultradns/r/record.html.markdown +++ b/website/source/docs/providers/ultradns/r/record.html.markdown @@ -12,7 +12,7 @@ Provides an UltraDNS record resource. ## Example Usage -``` +```hcl # Add a record to the domain resource "ultradns_record" "foobar" { zone = "${var.ultradns_domain}" diff --git a/website/source/docs/providers/ultradns/r/tcpool.html.markdown b/website/source/docs/providers/ultradns/r/tcpool.html.markdown index a550f15e2..b9295cfe6 100644 --- a/website/source/docs/providers/ultradns/r/tcpool.html.markdown +++ b/website/source/docs/providers/ultradns/r/tcpool.html.markdown @@ -11,7 +11,8 @@ description: |- Provides an UltraDNS Traffic Controller pool resource. ## Example Usage -``` + +```hcl # Create a Traffic Controller pool resource "ultradns_tcpool" "pool" { zone = "${var.ultradns_domain}" diff --git a/website/source/docs/providers/vcd/index.html.markdown b/website/source/docs/providers/vcd/index.html.markdown index 4398fc943..de0be34ae 100644 --- a/website/source/docs/providers/vcd/index.html.markdown +++ b/website/source/docs/providers/vcd/index.html.markdown @@ -16,7 +16,7 @@ Use the navigation to the left to read about the available resources. ## Example Usage -``` +```hcl # Configure the VMware vCloud Director Provider provider "vcd" { user = "${var.vcd_user}" diff --git a/website/source/docs/providers/vcd/r/dnat.html.markdown b/website/source/docs/providers/vcd/r/dnat.html.markdown index 3ee1ab505..6f18fb9af 100644 --- a/website/source/docs/providers/vcd/r/dnat.html.markdown +++ b/website/source/docs/providers/vcd/r/dnat.html.markdown @@ -13,7 +13,7 @@ and delete destination NATs to map an external IP/port to a VM. ## Example Usage -``` +```hcl resource "vcd_dnat" "web" { edge_gateway = "Edge Gateway Name" external_ip = "78.101.10.20" diff --git a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown index 01edc5110..a25a288fb 100644 --- a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown +++ b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown @@ -13,7 +13,7 @@ modify, and delete firewall settings and rules. ## Example Usage -``` +```hcl resource "vcd_firewall_rules" "fw" { edge_gateway = "Edge Gateway Name" default_action = "drop" diff --git a/website/source/docs/providers/vcd/r/network.html.markdown b/website/source/docs/providers/vcd/r/network.html.markdown index e53136e5a..85aafceaf 100644 --- a/website/source/docs/providers/vcd/r/network.html.markdown +++ b/website/source/docs/providers/vcd/r/network.html.markdown @@ -13,7 +13,7 @@ modify, and delete internal networks for vApps to connect. ## Example Usage -``` +```hcl resource "vcd_network" "net" { name = "my-net" edge_gateway = "Edge Gateway Name" diff --git a/website/source/docs/providers/vcd/r/snat.html.markdown b/website/source/docs/providers/vcd/r/snat.html.markdown index 3fe59bbcd..6c9903aca 100644 --- a/website/source/docs/providers/vcd/r/snat.html.markdown +++ b/website/source/docs/providers/vcd/r/snat.html.markdown @@ -13,7 +13,7 @@ and delete source NATs to allow vApps to send external traffic. ## Example Usage -``` +```hcl resource "vcd_snat" "outbound" { edge_gateway = "Edge Gateway Name" external_ip = "78.101.10.20" diff --git a/website/source/docs/providers/vcd/r/vapp.html.markdown b/website/source/docs/providers/vcd/r/vapp.html.markdown index 7b2d27e32..bed73fba9 100644 --- a/website/source/docs/providers/vcd/r/vapp.html.markdown +++ b/website/source/docs/providers/vcd/r/vapp.html.markdown @@ -13,7 +13,7 @@ modify, and delete vApps. ## Example Usage -``` +```hcl resource "vcd_network" "net" { # ... } diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown index 34fbcd5ff..46fdaee55 100644 --- a/website/source/docs/providers/vsphere/index.html.markdown +++ b/website/source/docs/providers/vsphere/index.html.markdown @@ -22,7 +22,7 @@ provider at this time only supports IPv4 addresses on virtual machines. ## Example Usage -``` +```hcl # Configure the VMware vSphere Provider provider "vsphere" { user = "${var.vsphere_user}" diff --git a/website/source/docs/providers/vsphere/r/file.html.markdown b/website/source/docs/providers/vsphere/r/file.html.markdown index cd2824e77..5ac5b55ab 100644 --- a/website/source/docs/providers/vsphere/r/file.html.markdown +++ b/website/source/docs/providers/vsphere/r/file.html.markdown @@ -16,7 +16,7 @@ Updates to file resources will handle moving a file to a new destination (datace **Upload file to vSphere:** -``` +```hcl resource "vsphere_file" "ubuntu_disk_upload" { datacenter = "my_datacenter" datastore = "local" @@ -27,7 +27,7 @@ resource "vsphere_file" "ubuntu_disk_upload" { **Copy file within vSphere:** -``` +```hcl resource "vsphere_file" "ubuntu_disk_copy" { source_datacenter = "my_datacenter" datacenter = "my_datacenter" diff --git a/website/source/docs/providers/vsphere/r/folder.html.markdown b/website/source/docs/providers/vsphere/r/folder.html.markdown index a4a490cdd..47cb880b0 100644 --- a/website/source/docs/providers/vsphere/r/folder.html.markdown +++ b/website/source/docs/providers/vsphere/r/folder.html.markdown @@ -12,7 +12,7 @@ Provides a VMware vSphere virtual machine folder resource. This can be used to c ## Example Usage -``` +```hcl resource "vsphere_folder" "web" { path = "terraform_web_folder" } diff --git a/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown b/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown index b6f97374c..deb5221e0 100644 --- a/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown +++ b/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown @@ -12,7 +12,7 @@ Provides a VMware virtual disk resource. This can be used to create and delete ## Example Usage -``` +```hcl resource "vsphere_virtual_disk" "myDisk" { size = 2 vmdk_path = "myDisk.vmdk" diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown index d1aa56c5b..05f609cef 100644 --- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -13,7 +13,7 @@ modify, and delete virtual machines. ## Example Usage -``` +```hcl resource "vsphere_virtual_machine" "web" { name = "terraform-web" vcpu = 2 @@ -31,7 +31,7 @@ resource "vsphere_virtual_machine" "web" { ## Example Usage VMware Cluster -``` +```hcl resource "vsphere_virtual_machine" "lb" { name = "lb01" folder = "Loadbalancers" From b3cc7ea4cf0c824f509756d5a26d1e56ac187f78 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 16:00:41 +0300 Subject: [PATCH 173/342] provider/aws: Add support for aws_dms_replication_task available states (#13697) --- builtin/providers/aws/resource_aws_dms_replication_task.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_dms_replication_task.go b/builtin/providers/aws/resource_aws_dms_replication_task.go index f137c186c..ab10eedbc 100644 --- a/builtin/providers/aws/resource_aws_dms_replication_task.go +++ b/builtin/providers/aws/resource_aws_dms_replication_task.go @@ -226,7 +226,7 @@ func resourceAwsDmsReplicationTaskUpdate(d *schema.ResourceData, meta interface{ stateConf := &resource.StateChangeConf{ Pending: []string{"modifying"}, - Target: []string{"ready"}, + Target: []string{"ready", "stopped", "failed"}, Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 10 * time.Second, From bb69e3730b9c6958cd12cbb86683629d27b6ee03 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 17 Apr 2017 16:26:08 +0300 Subject: [PATCH 174/342] provider/aws: Documentation fixed for elasticsearch_domain (#13704) Fixes: #13434 --- .../docs/providers/aws/r/elasticsearch_domain.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown index cf18732e7..050552795 100644 --- a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown +++ b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown @@ -20,7 +20,7 @@ resource "aws_elasticsearch_domain" "es" { } advanced_options { - "rest.action.multi.allow_explicit_index" = true + "rest.action.multi.allow_explicit_index" = "true" } access_policies = < Date: Mon, 17 Apr 2017 22:35:27 +0900 Subject: [PATCH 175/342] Add auto_minor_version_upgrade document (#13706) --- .../docs/providers/aws/r/rds_cluster_instance.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 031972d4b..782e8c161 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -75,6 +75,7 @@ what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. Eg: "04:00-09:00" * `preferred_maintenance_window` - (Optional) The window to perform maintenance in. Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". +* `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. * `tags` - (Optional) A mapping of tags to assign to the instance. ## Attributes Reference From 6f9570b114a4a1a6e35010fce263caa7d0cab3a1 Mon Sep 17 00:00:00 2001 From: = Date: Mon, 17 Apr 2017 09:12:42 -0600 Subject: [PATCH 176/342] Fixes TestAccAWSAutoscalingLifecycleHook_omitDefaultResult --- .../aws/resource_aws_autoscaling_lifecycle_hook_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go index 7fece49a2..580c2ed55 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go @@ -202,7 +202,7 @@ resource "aws_launch_configuration" "foobar" { } resource "aws_sqs_queue" "foobar" { - name = "foobar" + name = "foobar-%d" delay_seconds = 90 max_message_size = 2048 message_retention_seconds = 86400 @@ -225,7 +225,7 @@ EOF } resource "aws_iam_role_policy" "foobar" { - name = "foobar" + name = "foobar-%d" role = "${aws_iam_role.foobar.id}" policy = < Date: Mon, 17 Apr 2017 11:35:56 -0400 Subject: [PATCH 177/342] provider/aws: Run AWS Spot Datafeed Subscription tests in serial --- ...ort_aws_spot_datafeed_subscription_test.go | 6 ++--- ...rce_aws_spot_datafeed_subscription_test.go | 22 +++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go b/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go index 8d60f3994..24c7acc59 100644 --- a/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go +++ b/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" ) -func TestAccAWSSpotDatafeedSubscription_importBasic(t *testing.T) { +func testAccAWSSpotDatafeedSubscription_importBasic(t *testing.T) { resourceName := "aws_spot_datafeed_subscription.default" ri := acctest.RandInt() @@ -16,11 +16,11 @@ func TestAccAWSSpotDatafeedSubscription_importBasic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAWSSpotDatafeedSubscriptionDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccAWSSpotDatafeedSubscription(ri), }, - resource.TestStep{ + { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, diff --git a/builtin/providers/aws/resource_aws_spot_datafeed_subscription_test.go b/builtin/providers/aws/resource_aws_spot_datafeed_subscription_test.go index d12e7e765..b05c691e8 100644 --- a/builtin/providers/aws/resource_aws_spot_datafeed_subscription_test.go +++ b/builtin/providers/aws/resource_aws_spot_datafeed_subscription_test.go @@ -12,7 +12,21 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSSpotDatafeedSubscription_basic(t *testing.T) { +func TestAccAWSSpotDatafeedSubscription(t *testing.T) { + cases := map[string]func(t *testing.T){ + "basic": testAccAWSSpotDatafeedSubscription_basic, + "disappears": testAccAWSSpotDatafeedSubscription_disappears, + "import": testAccAWSSpotDatafeedSubscription_importBasic, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccAWSSpotDatafeedSubscription_basic(t *testing.T) { var subscription ec2.SpotDatafeedSubscription ri := acctest.RandInt() @@ -21,7 +35,7 @@ func TestAccAWSSpotDatafeedSubscription_basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAWSSpotDatafeedSubscriptionDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccAWSSpotDatafeedSubscription(ri), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSpotDatafeedSubscriptionExists("aws_spot_datafeed_subscription.default", &subscription), @@ -55,7 +69,7 @@ func testAccCheckAWSSpotDatafeedSubscriptionDisappears(subscription *ec2.SpotDat } } -func TestAccAWSSpotDatafeedSubscription_disappears(t *testing.T) { +func testAccAWSSpotDatafeedSubscription_disappears(t *testing.T) { var subscription ec2.SpotDatafeedSubscription ri := acctest.RandInt() @@ -64,7 +78,7 @@ func TestAccAWSSpotDatafeedSubscription_disappears(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAWSSpotDatafeedSubscriptionDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccAWSSpotDatafeedSubscription(ri), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSpotDatafeedSubscriptionExists("aws_spot_datafeed_subscription.default", &subscription), From 282e56cba83be12d3ef8e7bd12a8d5a5f382cb87 Mon Sep 17 00:00:00 2001 From: Adam Byrtek Date: Mon, 17 Apr 2017 15:36:13 +0000 Subject: [PATCH 178/342] Shell completions for fish --- contrib/fish-completion/README.md | 10 ++ contrib/fish-completion/terraform.fish | 171 +++++++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 contrib/fish-completion/README.md create mode 100644 contrib/fish-completion/terraform.fish diff --git a/contrib/fish-completion/README.md b/contrib/fish-completion/README.md new file mode 100644 index 000000000..a50ed1e81 --- /dev/null +++ b/contrib/fish-completion/README.md @@ -0,0 +1,10 @@ +# Terraform fish shell completion + +Copy the completions to your local fish configuration: + +``` +mkdir -p ~/.config/fish/completions +cp terraform.fish ~/.config/fish/completions +``` + +Please note that these completions have been merged upstream and should be bundled with fish 2.6 or later. diff --git a/contrib/fish-completion/terraform.fish b/contrib/fish-completion/terraform.fish new file mode 100644 index 000000000..41f3660f7 --- /dev/null +++ b/contrib/fish-completion/terraform.fish @@ -0,0 +1,171 @@ +# general options +complete -f -c terraform -l version -d 'Print version information' +complete -f -c terraform -l help -d 'Show help' + +### apply +complete -f -c terraform -n '__fish_use_subcommand' -a apply -d 'Build or change infrastructure' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o input -d 'Ask for input for variables if not directly set' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o parallelism -d 'Limit the number of concurrent operations' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o refresh -d 'Update state prior to checking for differences' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o state-out -d 'Path to write state' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o target -d 'Resource to target' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o var-file -d 'Set variables from a file' + +### console +complete -f -c terraform -n '__fish_use_subcommand' -a console -d 'Interactive console for Terraform interpolations' +complete -f -c terraform -n '__fish_seen_subcommand_from console' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from console' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from console' -o var-file -d 'Set variables from a file' + +### destroy +complete -f -c terraform -n '__fish_use_subcommand' -a destroy -d 'Destroy Terraform-managed infrastructure' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o force -d 'Don\'t ask for input for destroy confirmation' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o parallelism -d 'Limit the number of concurrent operations' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o refresh -d 'Update state prior to checking for differences' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o state-out -d 'Path to write state' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o target -d 'Resource to target' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o var-file -d 'Set variables from a file' + +### env +complete -f -c terraform -n '__fish_use_subcommand' -a env -d 'Environment management' +complete -f -c terraform -n '__fish_seen_subcommand_from env' -a list -d 'List environments' +complete -f -c terraform -n '__fish_seen_subcommand_from env' -a select -d 'Select an environment' +complete -f -c terraform -n '__fish_seen_subcommand_from env' -a new -d 'Create a new environment' +complete -f -c terraform -n '__fish_seen_subcommand_from env' -a delete -d 'Delete an existing environment' + +### fmt +complete -f -c terraform -n '__fish_use_subcommand' -a fmt -d 'Rewrite config files to canonical format' +complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o list -d 'List files whose formatting differs' +complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o write -d 'Write result to source file' +complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o diff -d 'Display diffs of formatting changes' + +### get +complete -f -c terraform -n '__fish_use_subcommand' -a get -d 'Download and install modules for the configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from get' -o update -d 'Check modules for updates' +complete -f -c terraform -n '__fish_seen_subcommand_from get' -o no-color -d 'If specified, output won\'t contain any color' + +### graph +complete -f -c terraform -n '__fish_use_subcommand' -a graph -d 'Create a visual graph of Terraform resources' +complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o draw-cycles -d 'Highlight any cycles in the graph' +complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o type -d 'Type of graph to output' + +### import +complete -f -c terraform -n '__fish_use_subcommand' -a import -d 'Import existing infrastructure into Terraform' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o config -d 'Path to a directory of configuration files' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o input -d 'Ask for input for variables if not directly set' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o provider -d 'Specific provider to use for import' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o state-out -d 'Path to write state' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from import' -o var-file -d 'Set variables from a file' + +### init +complete -f -c terraform -n '__fish_use_subcommand' -a init -d 'Initialize a new or existing Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o backend -d 'Configure the backend for this environment' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o backend-config -d 'Backend configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o get -d 'Download modules for this configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o input -d 'Ask for input if necessary' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from init' -o force-copy -d 'Suppress prompts about copying state data' + +### output +complete -f -c terraform -n '__fish_use_subcommand' -a output -d 'Read an output from a state file' +complete -f -c terraform -n '__fish_seen_subcommand_from output' -o state -d 'Path to the state file to read' +complete -f -c terraform -n '__fish_seen_subcommand_from output' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from output' -o module -d 'Return the outputs for a specific module' +complete -f -c terraform -n '__fish_seen_subcommand_from output' -o json -d 'Print output in JSON format' + +### plan +complete -f -c terraform -n '__fish_use_subcommand' -a plan -d 'Generate and show an execution plan' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o destroy -d 'Generate a plan to destroy all resources' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o detailed-exitcode -d 'Return detailed exit codes' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o input -d 'Ask for input for variables if not directly set' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o module-depth -d 'Depth of modules to show in the output' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o out -d 'Write a plan file to the given path' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o parallelism -d 'Limit the number of concurrent operations' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o refresh -d 'Update state prior to checking for differences' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o target -d 'Resource to target' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o var-file -d 'Set variables from a file' + +### push +complete -f -c terraform -n '__fish_use_subcommand' -a push -d 'Upload this Terraform module to Atlas to run' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o atlas-address -d 'An alternate address to an Atlas instance' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o upload-modules -d 'Lock modules and upload completely' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o name -d 'Name of the configuration in Atlas' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o token -d 'Access token to use to upload' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o overwrite -d 'Variable keys that should overwrite values in Atlas' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o var-file -d 'Set variables from a file' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o vcs -d 'Upload only files committed to your VCS' +complete -f -c terraform -n '__fish_seen_subcommand_from push' -o no-color -d 'If specified, output won\'t contain any color' + +### refresh +complete -f -c terraform -n '__fish_use_subcommand' -a refresh -d 'Update local state file against real resources' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o input -d 'Ask for input for variables if not directly set' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o state-out -d 'Path to write state' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o target -d 'Resource to target' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o var -d 'Set a variable in the Terraform configuration' +complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o var-file -d 'Set variables from a file' + +### show +complete -f -c terraform -n '__fish_use_subcommand' -a show -d 'Inspect Terraform state or plan' +complete -f -c terraform -n '__fish_seen_subcommand_from show' -o module-depth -d 'Depth of modules to show in the output' +complete -f -c terraform -n '__fish_seen_subcommand_from show' -o no-color -d 'If specified, output won\'t contain any color' + +### taint +complete -f -c terraform -n '__fish_use_subcommand' -a taint -d 'Manually mark a resource for recreation' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o allow-missing -d 'Succeed even if resource is missing' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o module -d 'The module path where the resource lives' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o state-out -d 'Path to write state' + +### untaint +complete -f -c terraform -n '__fish_use_subcommand' -a untaint -d 'Manually unmark a resource as tainted' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o allow-missing -d 'Succeed even if resource is missing' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o backup -d 'Path to backup the existing state file' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o lock -d 'Lock the state file when locking is supported' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o lock-timeout -d 'Duration to retry a state lock' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o module -d 'The module path where the resource lives' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o no-color -d 'If specified, output won\'t contain any color' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o state -d 'Path to a Terraform state file' +complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o state-out -d 'Path to write state' + +### validate +complete -f -c terraform -n '__fish_use_subcommand' -a validate -d 'Validate the Terraform files' +complete -f -c terraform -n '__fish_seen_subcommand_from validate' -o no-color -d 'If specified, output won\'t contain any color' + +### version +complete -f -c terraform -n '__fish_use_subcommand' -a version -d 'Print the Terraform version' From 4d79e0b99c237344a10ff7759d62f777d560c5bd Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 17 Apr 2017 10:45:10 -0700 Subject: [PATCH 179/342] website: documentation tweaks for the local_file resource and its provider --- .../docs/providers/local/index.html.markdown | 15 ++++++------- .../docs/providers/local/r/file.html.md | 21 ++++++++++++------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/website/source/docs/providers/local/index.html.markdown b/website/source/docs/providers/local/index.html.markdown index cc83fd241..8c007c15a 100644 --- a/website/source/docs/providers/local/index.html.markdown +++ b/website/source/docs/providers/local/index.html.markdown @@ -3,17 +3,18 @@ layout: "local" page_title: "Provider: Local" sidebar_current: "docs-local-index" description: |- - The Local provider is used to manage local resources (i.e. files). + The Local provider is used to manage local resources, such as files. --- # Local Provider -The Local provider is used to manage local resources (i.e. files). +The Local provider is used to manage local resources, such as files. Use the navigation to the left to read about the available resources. -## Example Usage - -``` -provider "local" {} -``` +~> **Note** Terraform primarily deals with remote resources which are able +to outlive a single Terraform run, and so local resources can sometimes violate +its assumptions. The resources here are best used with care, since depending +on local state can make it hard to apply the same Terraform configuration on +many different local systems where the local resources may not be universally +available. See specific notes in each resource for more information. diff --git a/website/source/docs/providers/local/r/file.html.md b/website/source/docs/providers/local/r/file.html.md index f0e3c8628..83ac2a325 100644 --- a/website/source/docs/providers/local/r/file.html.md +++ b/website/source/docs/providers/local/r/file.html.md @@ -6,14 +6,20 @@ description: |- Generates a local file from content. --- -# local\_file +# local_file -Generates a local file from a given content. +Generates a local file with the given content. + +~> **Note** When working with local files, Terraform will detect the resource +as having been deleted each time a configuration is applied on a new machine +where the file is not present and will generate a diff to re-create it. This +may cause "noise" in diffs in environments where configurations are routinely +applied by many different users or within automation systems. ## Example Usage -``` -data "local_file" "foo" { +```hcl +resource "local_file" "foo" { content = "foo!" filename = "${path.module}/foo.bar" } @@ -23,8 +29,9 @@ data "local_file" "foo" { The following arguments are supported: -* `content` - (required) The content of file to create. +* `content` - (Required) The content of file to create. -* `filename` - (required) The path of the file to create. +* `filename` - (Required) The path of the file to create. -NOTE: Any required parent folders are created automatically. Additionally, any existing file will get overwritten. \ No newline at end of file +Any required parent directories will be created automatically, and any existing +file with the given name will be overwritten. From 9810d974b03d5fd6aff1cbcb74924fe44bd4e620 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 17 Apr 2017 10:49:16 -0700 Subject: [PATCH 180/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index deac7d31a..1564fefcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: * **New Data Source:** `google_compute_network` [GH-12442] * **New Data Source:** `google_compute_subnetwork` [GH-12442] +* **New Resource:** `local_file` for creating local files (please see the docs for caveats) [GH-12757] IMPROVEMENTS: * state/remote/swift: Support Openstack request logging [GH-13583] From 55d99c05e1c75d1bedb13af237d55846913fcf49 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 17 Apr 2017 11:23:16 -0700 Subject: [PATCH 181/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1564fefcd..a8cd5ed5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ IMPROVEMENTS: * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` [GH-13273] * provider/azurerm: VM Scale Sets - import support [GH-13464] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] + * provider/vault: `vault_generic_secret` resource can now optionally detect drift if it has appropriate access [GH-11776] BUG FIXES: From a6565555cb04ca5b22c2a415f0ba64e5149badfb Mon Sep 17 00:00:00 2001 From: clint shryock Date: Mon, 17 Apr 2017 15:18:19 -0500 Subject: [PATCH 182/342] provider/aws: Update Lambda tests for randomness --- .../aws/resource_aws_lambda_function_test.go | 51 ++++++++++--------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lambda_function_test.go b/builtin/providers/aws/resource_aws_lambda_function_test.go index ce00164c4..26bcc78e3 100644 --- a/builtin/providers/aws/resource_aws_lambda_function_test.go +++ b/builtin/providers/aws/resource_aws_lambda_function_test.go @@ -293,6 +293,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) { defer os.Remove(path) rInt := acctest.RandInt() + rName := fmt.Sprintf("tf_acc_lambda_local_%d", rInt) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -303,11 +304,11 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) { PreConfig: func() { testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) }, - Config: genAWSLambdaFunctionConfig_local(path, rInt), + Config: genAWSLambdaFunctionConfig_local(path, rInt, rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_local"), + testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), + testAccCheckAwsLambdaFunctionName(&conf, rName), + testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), ), }, @@ -315,11 +316,11 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) { PreConfig: func() { testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile) }, - Config: genAWSLambdaFunctionConfig_local(path, rInt), + Config: genAWSLambdaFunctionConfig_local(path, rInt, rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_local"), + testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), + testAccCheckAwsLambdaFunctionName(&conf, rName), + testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), ), }, @@ -377,7 +378,7 @@ func TestAccAWSLambdaFunction_localUpdate_nameOnly(t *testing.T) { }) } -func TestAccAWSLambdaFunction_s3Update(t *testing.T) { +func TestAccAWSLambdaFunction_s3Update_basic(t *testing.T) { var conf lambda.GetFunctionOutput path, zipFile, err := createTempFile("lambda_s3Update") @@ -391,6 +392,8 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) { rInt := acctest.RandInt() + rName := fmt.Sprintf("tf_acc_lambda_%d", rInt) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -401,11 +404,11 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) { // Upload 1st version testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) }, - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt), + Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3"), + testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", rName, &conf), + testAccCheckAwsLambdaFunctionName(&conf, rName), + testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), ), }, @@ -415,16 +418,16 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) { // Upload 2nd version testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile) }, - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt), + Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), }, // Extra step because of missing ComputedWhen // See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330 { - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt), + Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3"), + testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", rName, &conf), + testAccCheckAwsLambdaFunctionName(&conf, rName), + testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), ), }, @@ -1125,16 +1128,16 @@ EOF resource "aws_lambda_function" "lambda_function_local" { filename = "%s" source_code_hash = "${base64sha256(file("%s"))}" - function_name = "tf_acc_lambda_name_local" + function_name = "%s" role = "${aws_iam_role.iam_for_lambda.arn}" handler = "exports.example" runtime = "nodejs4.3" } ` -func genAWSLambdaFunctionConfig_local(filePath string, rInt int) string { +func genAWSLambdaFunctionConfig_local(filePath string, rInt int, rName string) string { return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl, rInt, - filePath, filePath) + filePath, filePath, rName) } func genAWSLambdaFunctionConfig_local_name_only(filePath, rName string) string { @@ -1207,16 +1210,16 @@ resource "aws_lambda_function" "lambda_function_s3" { s3_bucket = "${aws_s3_bucket_object.o.bucket}" s3_key = "${aws_s3_bucket_object.o.key}" s3_object_version = "${aws_s3_bucket_object.o.version_id}" - function_name = "tf_acc_lambda_name_s3" + function_name = "%s" role = "${aws_iam_role.iam_for_lambda.arn}" handler = "exports.example" runtime = "nodejs4.3" } ` -func genAWSLambdaFunctionConfig_s3(bucket, key, path string, rInt int) string { +func genAWSLambdaFunctionConfig_s3(bucket, key, path string, rInt int, rName string) string { return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl, - bucket, key, path, path, rInt) + bucket, key, path, path, rInt, rName) } func testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path string) string { From 8939ac22563880ab679ee13c1f3e7a54488e9504 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Mon, 17 Apr 2017 16:02:01 -0500 Subject: [PATCH 183/342] provider/aws: randomize some more test --- .../aws/resource_aws_ecs_service_test.go | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go index 53d38ccbd..f622d64b7 100644 --- a/builtin/providers/aws/resource_aws_ecs_service_test.go +++ b/builtin/providers/aws/resource_aws_ecs_service_test.go @@ -244,13 +244,15 @@ func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { } func TestAccAWSEcsService_withAlb(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc") + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSEcsServiceWithAlb, + Config: testAccAWSEcsServiceWithAlb(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsServiceExists("aws_ecs_service.with_alb"), ), @@ -874,11 +876,15 @@ resource "aws_ecs_service" "jenkins" { } ` -var testAccAWSEcsServiceWithAlb = ` +func testAccAWSEcsServiceWithAlb(rName string) string { + return fmt.Sprintf(` data "aws_availability_zones" "available" {} resource "aws_vpc" "main" { cidr_block = "10.10.0.0/16" + tags { + Name = "TestAccAWSEcsService_withAlb" + } } resource "aws_subnet" "main" { @@ -889,11 +895,11 @@ resource "aws_subnet" "main" { } resource "aws_ecs_cluster" "main" { - name = "terraform_acc_test_ecs_15" + name = "%s" } resource "aws_ecs_task_definition" "with_lb_changes" { - family = "tf_acc_test_ghost_lbd" + family = "%s" container_definitions = < Date: Tue, 18 Apr 2017 10:35:25 +0100 Subject: [PATCH 184/342] Mention LOC of incoming PRs in contributors guide (#13737) --- .github/CONTRIBUTING.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 8b5d6a1c9..72499c521 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -201,6 +201,9 @@ Implementing a new resource is a good way to learn more about how Terraform interacts with upstream APIs. There are plenty of examples to draw from in the existing resources, but you still get to implement something completely new. + - [ ] __Minimal LOC__: It can be inefficient for both the reviewer + and author to go through long feedback cycles on a big PR with many + resources. We therefore encourage you to only submit **1 resource at a time**. - [ ] __Acceptance tests__: New resources should include acceptance tests covering their behavior. See [Writing Acceptance Tests](#writing-acceptance-tests) below for a detailed guide on how to @@ -223,6 +226,11 @@ Implementing a new provider gives Terraform the ability to manage resources in a whole new API. It's a larger undertaking, but brings major new functionality into Terraform. + - [ ] __Minimal initial LOC__: Some providers may be big and it can be + inefficient for both reviewer & author to go through long feedback cycles + on a big PR with many resources. We encourage you to only submit + the necessary minimum in a single PR, ideally **just the first resource** + of the provider. - [ ] __Acceptance tests__: Each provider should include an acceptance test suite with tests for each resource should include acceptance tests covering its behavior. See [Writing Acceptance Tests](#writing-acceptance-tests) below From c2d1a37334938e250600b066760ce95d04acecde Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 18 Apr 2017 13:52:40 +0200 Subject: [PATCH 185/342] DigitalOcean: Export droplet prices (#13720) Note: In DO, Droplets are about the only thing you pay. DNS/Float IP are free. Signed-off-by: Julien Pivotto --- .../digitalocean/resource_digitalocean_droplet.go | 12 ++++++++++++ .../resource_digitalocean_droplet_test.go | 8 ++++++++ .../source/docs/providers/do/r/droplet.html.markdown | 2 ++ 3 files changed, 22 insertions(+) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet.go b/builtin/providers/digitalocean/resource_digitalocean_droplet.go index 76212d579..48372d0e7 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet.go @@ -63,6 +63,16 @@ func resourceDigitalOceanDroplet() *schema.Resource { Computed: true, }, + "price_hourly": { + Type: schema.TypeFloat, + Computed: true, + }, + + "price_monthly": { + Type: schema.TypeFloat, + Computed: true, + }, + "resize_disk": { Type: schema.TypeBool, Optional: true, @@ -259,6 +269,8 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e d.Set("name", droplet.Name) d.Set("region", droplet.Region.Slug) d.Set("size", droplet.Size.Slug) + d.Set("price_hourly", droplet.Size.PriceHourly) + d.Set("price_monthly", droplet.Size.PriceMonthly) d.Set("disk", droplet.Disk) d.Set("vcpus", droplet.Vcpus) d.Set("status", droplet.Status) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 1c3d5601c..90bece0e3 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -333,6 +333,14 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.T return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug) } + if droplet.Size.PriceHourly != 0.00744 { + return fmt.Errorf("Bad price_hourly: %s", droplet.Size.PriceHourly) + } + + if droplet.Size.PriceMonthly != 5.0 { + return fmt.Errorf("Bad price_monthly: %s", droplet.Size.PriceMonthly) + } + if droplet.Region.Slug != "nyc3" { return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug) } diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown index f2fd16c6c..3b15b2e4e 100644 --- a/website/source/docs/providers/do/r/droplet.html.markdown +++ b/website/source/docs/providers/do/r/droplet.html.markdown @@ -65,6 +65,8 @@ The following attributes are exported: * `ipv4_address_private` - The private networking IPv4 address * `locked` - Is the Droplet locked * `private_networking` - Is private networking enabled +* `price_hourly` - Droplet hourly price +* `price_monthly` - Droplet monthly price * `size` - The instance size * `disk` - The size of the instance's disk in GB * `vcpus` - The number of the instance's virtual CPUs From 16c0594c475d9e75bf5de51fe906d32542410fcf Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 18 Apr 2017 14:53:12 +0300 Subject: [PATCH 186/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8cd5ed5a..858382729 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ IMPROVEMENTS: * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` [GH-13273] * provider/azurerm: VM Scale Sets - import support [GH-13464] + * provider/digitalocean: Export droplet prices [GH-13720] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] * provider/vault: `vault_generic_secret` resource can now optionally detect drift if it has appropriate access [GH-11776] From 69ec7e12ac6787c01ea78a65faec29e1720d894c Mon Sep 17 00:00:00 2001 From: demonwy Date: Tue, 18 Apr 2017 20:18:57 +0800 Subject: [PATCH 187/342] provider/alicloud: Add new resource ESS, including scalinggroup scalingconfiguration scalingrule and schedule (#13731) * add docs * add new resource ess * add examples * update aliyun go * merge master * fix ci --- builtin/providers/alicloud/common.go | 39 +- builtin/providers/alicloud/config.go | 13 + builtin/providers/alicloud/errors.go | 18 + builtin/providers/alicloud/provider.go | 22 +- .../alicloud/resource_alicloud_db_instance.go | 7 +- .../resource_alicloud_db_instance_test.go | 2 +- .../alicloud/resource_alicloud_eip.go | 15 +- .../resource_alicloud_eip_association.go | 2 +- ...ource_alicloud_ess_scalingconfiguration.go | 320 +++++++++++ ..._alicloud_ess_scalingconfiguration_test.go | 495 ++++++++++++++++++ .../resource_alicloud_ess_scalinggroup.go | 209 ++++++++ ...resource_alicloud_ess_scalinggroup_test.go | 297 +++++++++++ .../resource_alicloud_ess_scalingrule.go | 168 ++++++ .../resource_alicloud_ess_scalingrule_test.go | 290 ++++++++++ .../resource_alicloud_ess_schedule.go | 220 ++++++++ .../resource_alicloud_ess_schedule_test.go | 151 ++++++ .../alicloud/resource_alicloud_forward.go | 165 ++++++ .../resource_alicloud_forward_test.go | 216 ++++++++ .../alicloud/resource_alicloud_instance.go | 81 +-- .../alicloud/resource_alicloud_nat_gateway.go | 93 +++- .../resource_alicloud_nat_gateway_test.go | 42 +- .../resource_alicloud_security_group.go | 5 + .../resource_alicloud_security_group_rule.go | 31 +- .../alicloud/resource_alicloud_slb.go | 5 + .../resource_alicloud_slb_attachment.go | 8 +- .../alicloud/resource_alicloud_snat.go | 134 +++++ .../alicloud/resource_alicloud_snat_test.go | 180 +++++++ .../alicloud/resource_alicloud_vpc.go | 4 +- .../alicloud/resource_alicloud_vswitch.go | 4 +- .../alicloud/service_alicloud_ecs.go | 4 +- .../alicloud/service_alicloud_ess.go | 167 ++++++ .../alicloud/service_alicloud_rds.go | 28 +- .../alicloud/service_alicloud_vpc.go | 75 ++- .../alicloud/struct_security_groups.go | 11 - builtin/providers/alicloud/validators.go | 21 +- builtin/providers/alicloud/validators_test.go | 14 +- examples/alicloud-ess-scaling/README.md | 17 + examples/alicloud-ess-scaling/main.tf | 38 ++ examples/alicloud-ess-scaling/outputs.tf | 7 + examples/alicloud-ess-scaling/variables.tf | 24 + examples/alicloud-ess-schedule/README.md | 17 + examples/alicloud-ess-schedule/main.tf | 51 ++ examples/alicloud-ess-schedule/outputs.tf | 11 + examples/alicloud-ess-schedule/variables.tf | 32 ++ examples/alicloud-vpc-snat/main.tf | 87 +++ examples/alicloud-vpc-snat/ouputs.tf | 7 + examples/alicloud-vpc-snat/variables.tf | 22 + .../denverdino/aliyungo/common/client.go | 77 +++ .../denverdino/aliyungo/common/endpoints.xml | 68 ++- .../denverdino/aliyungo/ecs/forward_entry.go | 104 ++++ .../denverdino/aliyungo/ecs/vpcs.go | 1 + .../denverdino/aliyungo/ecs/vswitches.go | 1 + .../denverdino/aliyungo/ess/client.go | 48 ++ .../denverdino/aliyungo/ess/configuration.go | 127 +++++ .../denverdino/aliyungo/ess/group.go | 242 +++++++++ .../denverdino/aliyungo/ess/rule.go | 130 +++++ .../denverdino/aliyungo/ess/schedule.go | 140 +++++ .../denverdino/aliyungo/util/encoding.go | 162 ++++++ vendor/vendor.json | 32 +- .../alicloud/r/db_instance.html.markdown | 27 +- .../providers/alicloud/r/disk.html.markdown | 12 +- .../alicloud/r/disk_attachment.html.markdown | 8 +- .../providers/alicloud/r/eip.html.markdown | 6 +- .../alicloud/r/eip_association.html.markdown | 8 +- .../r/ess_scaling_configuration.html.markdown | 84 +++ .../r/ess_scaling_group.html.markdown | 57 ++ .../alicloud/r/ess_scaling_rule.html.markdown | 59 +++ .../alicloud/r/ess_schedule.html.markdown | 65 +++ .../alicloud/r/forward.html.markdown | 68 +++ .../alicloud/r/instance.html.markdown | 21 +- .../alicloud/r/nat_gateway.html.markdown | 23 +- .../alicloud/r/security_group.html.markdown | 11 +- .../r/security_group_rule.html.markdown | 6 +- .../providers/alicloud/r/slb.html.markdown | 61 ++- .../alicloud/r/slb_attachment.html.markdown | 8 +- .../providers/alicloud/r/snat.html.markdown | 61 +++ .../providers/alicloud/r/vpc.html.markdown | 3 +- .../alicloud/r/vroute_entry.html.markdown | 4 +- .../alicloud/r/vswitch.html.markdown | 4 +- website/source/layouts/alicloud.erb | 181 ++++--- 80 files changed, 5475 insertions(+), 303 deletions(-) create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_schedule.go create mode 100644 builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go create mode 100644 builtin/providers/alicloud/resource_alicloud_forward.go create mode 100644 builtin/providers/alicloud/resource_alicloud_forward_test.go create mode 100644 builtin/providers/alicloud/resource_alicloud_snat.go create mode 100644 builtin/providers/alicloud/resource_alicloud_snat_test.go create mode 100644 builtin/providers/alicloud/service_alicloud_ess.go delete mode 100644 builtin/providers/alicloud/struct_security_groups.go create mode 100644 examples/alicloud-ess-scaling/README.md create mode 100644 examples/alicloud-ess-scaling/main.tf create mode 100644 examples/alicloud-ess-scaling/outputs.tf create mode 100644 examples/alicloud-ess-scaling/variables.tf create mode 100644 examples/alicloud-ess-schedule/README.md create mode 100644 examples/alicloud-ess-schedule/main.tf create mode 100644 examples/alicloud-ess-schedule/outputs.tf create mode 100644 examples/alicloud-ess-schedule/variables.tf create mode 100644 examples/alicloud-vpc-snat/main.tf create mode 100644 examples/alicloud-vpc-snat/ouputs.tf create mode 100644 examples/alicloud-vpc-snat/variables.tf create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go create mode 100644 vendor/github.com/denverdino/aliyungo/ess/client.go create mode 100644 vendor/github.com/denverdino/aliyungo/ess/configuration.go create mode 100644 vendor/github.com/denverdino/aliyungo/ess/group.go create mode 100644 vendor/github.com/denverdino/aliyungo/ess/rule.go create mode 100644 vendor/github.com/denverdino/aliyungo/ess/schedule.go create mode 100644 website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown create mode 100644 website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown create mode 100644 website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown create mode 100644 website/source/docs/providers/alicloud/r/ess_schedule.html.markdown create mode 100644 website/source/docs/providers/alicloud/r/forward.html.markdown create mode 100644 website/source/docs/providers/alicloud/r/snat.html.markdown diff --git a/builtin/providers/alicloud/common.go b/builtin/providers/alicloud/common.go index c2af2a683..e9bb1a9f8 100644 --- a/builtin/providers/alicloud/common.go +++ b/builtin/providers/alicloud/common.go @@ -17,38 +17,39 @@ const ( const defaultTimeout = 120 // timeout for long time progerss product, rds e.g. -const defaultLongTimeout = 800 +const defaultLongTimeout = 1000 func getRegion(d *schema.ResourceData, meta interface{}) common.Region { return meta.(*AliyunClient).Region } func notFoundError(err error) bool { - if e, ok := err.(*common.Error); ok && (e.StatusCode == 404 || e.ErrorResponse.Message == "Not found") { + if e, ok := err.(*common.Error); ok && + (e.StatusCode == 404 || e.ErrorResponse.Message == "Not found" || e.Code == InstanceNotfound) { return true } return false } -// Protocal represents network protocal -type Protocal string +// Protocol represents network protocol +type Protocol string -// Constants of protocal definition +// Constants of protocol definition const ( - Http = Protocal("http") - Https = Protocal("https") - Tcp = Protocal("tcp") - Udp = Protocal("udp") + Http = Protocol("http") + Https = Protocol("https") + Tcp = Protocol("tcp") + Udp = Protocol("udp") ) -// ValidProtocals network protocal list -var ValidProtocals = []Protocal{Http, Https, Tcp, Udp} +// ValidProtocols network protocol list +var ValidProtocols = []Protocol{Http, Https, Tcp, Udp} // simple array value check method, support string type only -func isProtocalValid(value string) bool { +func isProtocolValid(value string) bool { res := false - for _, v := range ValidProtocals { + for _, v := range ValidProtocols { if string(v) == value { res = true } @@ -77,4 +78,16 @@ const DB_DEFAULT_CONNECT_PORT = "3306" const COMMA_SEPARATED = "," +const COLON_SEPARATED = ":" + const LOCAL_HOST_IP = "127.0.0.1" + +// Takes the result of flatmap.Expand for an array of strings +// and returns a []string +func expandStringList(configured []interface{}) []string { + vs := make([]string, 0, len(configured)) + for _, v := range configured { + vs = append(vs, v.(string)) + } + return vs +} diff --git a/builtin/providers/alicloud/config.go b/builtin/providers/alicloud/config.go index e17003bb2..f84c7e02a 100644 --- a/builtin/providers/alicloud/config.go +++ b/builtin/providers/alicloud/config.go @@ -5,6 +5,7 @@ import ( "github.com/denverdino/aliyungo/common" "github.com/denverdino/aliyungo/ecs" + "github.com/denverdino/aliyungo/ess" "github.com/denverdino/aliyungo/rds" "github.com/denverdino/aliyungo/slb" ) @@ -20,6 +21,7 @@ type Config struct { type AliyunClient struct { Region common.Region ecsconn *ecs.Client + essconn *ess.Client rdsconn *rds.Client // use new version ecsNewconn *ecs.Client @@ -60,6 +62,11 @@ func (c *Config) Client() (*AliyunClient, error) { return nil, err } + essconn, err := c.essConn() + if err != nil { + return nil, err + } + return &AliyunClient{ Region: c.Region, ecsconn: ecsconn, @@ -67,6 +74,7 @@ func (c *Config) Client() (*AliyunClient, error) { vpcconn: vpcconn, slbconn: slbconn, rdsconn: rdsconn, + essconn: essconn, }, nil } @@ -123,3 +131,8 @@ func (c *Config) vpcConn() (*ecs.Client, error) { return client, nil } +func (c *Config) essConn() (*ess.Client, error) { + client := ess.NewESSClient(c.AccessKey, c.SecretKey, c.Region) + client.SetBusinessInfo(BusinessInfoKey) + return client, nil +} diff --git a/builtin/providers/alicloud/errors.go b/builtin/providers/alicloud/errors.go index 338525330..06d29642b 100644 --- a/builtin/providers/alicloud/errors.go +++ b/builtin/providers/alicloud/errors.go @@ -1,5 +1,7 @@ package alicloud +import "github.com/denverdino/aliyungo/common" + const ( // common Notfound = "Not found" @@ -25,7 +27,23 @@ const ( //Nat gateway NatGatewayInvalidRegionId = "Invalid.RegionId" DependencyViolationBandwidthPackages = "DependencyViolation.BandwidthPackages" + NotFindSnatEntryBySnatId = "NotFindSnatEntryBySnatId" + NotFindForwardEntryByForwardId = "NotFindForwardEntryByForwardId" // vswitch VswitcInvalidRegionId = "InvalidRegionId.NotFound" + + // ess + InvalidScalingGroupIdNotFound = "InvalidScalingGroupId.NotFound" + IncorrectScalingConfigurationLifecycleState = "IncorrectScalingConfigurationLifecycleState" ) + +func GetNotFoundErrorFromString(str string) error { + return &common.Error{ + ErrorResponse: common.ErrorResponse{ + Code: InstanceNotfound, + Message: str, + }, + StatusCode: -1, + } +} diff --git a/builtin/providers/alicloud/provider.go b/builtin/providers/alicloud/provider.go index 677c1c70d..fe3613014 100644 --- a/builtin/providers/alicloud/provider.go +++ b/builtin/providers/alicloud/provider.go @@ -38,18 +38,24 @@ func Provider() terraform.ResourceProvider { "alicloud_instance_types": dataSourceAlicloudInstanceTypes(), }, ResourcesMap: map[string]*schema.Resource{ - "alicloud_instance": resourceAliyunInstance(), - "alicloud_disk": resourceAliyunDisk(), - "alicloud_disk_attachment": resourceAliyunDiskAttachment(), - "alicloud_security_group": resourceAliyunSecurityGroup(), - "alicloud_security_group_rule": resourceAliyunSecurityGroupRule(), - "alicloud_db_instance": resourceAlicloudDBInstance(), - "alicloud_vpc": resourceAliyunVpc(), - "alicloud_nat_gateway": resourceAliyunNatGateway(), + "alicloud_instance": resourceAliyunInstance(), + "alicloud_disk": resourceAliyunDisk(), + "alicloud_disk_attachment": resourceAliyunDiskAttachment(), + "alicloud_security_group": resourceAliyunSecurityGroup(), + "alicloud_security_group_rule": resourceAliyunSecurityGroupRule(), + "alicloud_db_instance": resourceAlicloudDBInstance(), + "alicloud_ess_scaling_group": resourceAlicloudEssScalingGroup(), + "alicloud_ess_scaling_configuration": resourceAlicloudEssScalingConfiguration(), + "alicloud_ess_scaling_rule": resourceAlicloudEssScalingRule(), + "alicloud_ess_schedule": resourceAlicloudEssSchedule(), + "alicloud_vpc": resourceAliyunVpc(), + "alicloud_nat_gateway": resourceAliyunNatGateway(), //both subnet and vswith exists,cause compatible old version, and compatible aws habit. "alicloud_subnet": resourceAliyunSubnet(), "alicloud_vswitch": resourceAliyunSubnet(), "alicloud_route_entry": resourceAliyunRouteEntry(), + "alicloud_snat_entry": resourceAliyunSnatEntry(), + "alicloud_forward_entry": resourceAliyunForwardEntry(), "alicloud_eip": resourceAliyunEip(), "alicloud_eip_association": resourceAliyunEipAssociation(), "alicloud_slb": resourceAliyunSlb(), diff --git a/builtin/providers/alicloud/resource_alicloud_db_instance.go b/builtin/providers/alicloud/resource_alicloud_db_instance.go index c19aef165..062b5d0e1 100644 --- a/builtin/providers/alicloud/resource_alicloud_db_instance.go +++ b/builtin/providers/alicloud/resource_alicloud_db_instance.go @@ -218,7 +218,7 @@ func resourceAlicloudDBInstanceCreate(d *schema.ResourceData, meta interface{}) // wait instance status change from Creating to running if err := conn.WaitForInstance(d.Id(), rds.Running, defaultLongTimeout); err != nil { - log.Printf("[DEBUG] WaitForInstance %s got error: %#v", rds.Running, err) + return fmt.Errorf("WaitForInstance %s got error: %#v", rds.Running, err) } if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil { @@ -386,6 +386,11 @@ func resourceAlicloudDBInstanceRead(d *schema.ResourceData, meta interface{}) er if err != nil { return err } + if resp.Databases.Database == nil { + d.SetId("") + return nil + } + d.Set("db_mappings", flattenDatabaseMappings(resp.Databases.Database)) argn := rds.DescribeDBInstanceNetInfoArgs{ diff --git a/builtin/providers/alicloud/resource_alicloud_db_instance_test.go b/builtin/providers/alicloud/resource_alicloud_db_instance_test.go index 8348e5089..498cc3ae1 100644 --- a/builtin/providers/alicloud/resource_alicloud_db_instance_test.go +++ b/builtin/providers/alicloud/resource_alicloud_db_instance_test.go @@ -535,7 +535,7 @@ func testAccCheckDBInstanceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*AliyunClient) for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_db_instance.foo" { + if rs.Type != "alicloud_db_instance" { continue } diff --git a/builtin/providers/alicloud/resource_alicloud_eip.go b/builtin/providers/alicloud/resource_alicloud_eip.go index 8a0329eb8..f1c9621a8 100644 --- a/builtin/providers/alicloud/resource_alicloud_eip.go +++ b/builtin/providers/alicloud/resource_alicloud_eip.go @@ -78,7 +78,14 @@ func resourceAliyunEipRead(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } - return err + return fmt.Errorf("Error Describe Eip Attribute: %#v", err) + } + + if eip.InstanceId != "" { + d.Set("instance", eip.InstanceId) + } else { + d.Set("instance", "") + return nil } bandwidth, _ := strconv.Atoi(eip.Bandwidth) @@ -87,12 +94,6 @@ func resourceAliyunEipRead(d *schema.ResourceData, meta interface{}) error { d.Set("ip_address", eip.IpAddress) d.Set("status", eip.Status) - if eip.InstanceId != "" { - d.Set("instance", eip.InstanceId) - } else { - d.Set("instance", "") - } - return nil } diff --git a/builtin/providers/alicloud/resource_alicloud_eip_association.go b/builtin/providers/alicloud/resource_alicloud_eip_association.go index a9d419ce1..5f492b40b 100644 --- a/builtin/providers/alicloud/resource_alicloud_eip_association.go +++ b/builtin/providers/alicloud/resource_alicloud_eip_association.go @@ -66,7 +66,7 @@ func resourceAliyunEipAssociationRead(d *schema.ResourceData, meta interface{}) d.SetId("") return nil } - return err + return fmt.Errorf("Error Describe Eip Attribute: %#v", err) } if eip.InstanceId != instanceId { diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go new file mode 100644 index 000000000..3a8d94380 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go @@ -0,0 +1,320 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ecs" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "strings" + "time" +) + +func resourceAlicloudEssScalingConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunEssScalingConfigurationCreate, + Read: resourceAliyunEssScalingConfigurationRead, + Update: resourceAliyunEssScalingConfigurationUpdate, + Delete: resourceAliyunEssScalingConfigurationDelete, + + Schema: map[string]*schema.Schema{ + "active": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "enable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "scaling_group_id": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "instance_type": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "io_optimized": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateIoOptimized, + }, + "security_group_id": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "scaling_configuration_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "internet_charge_type": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ValidateFunc: validateInternetChargeType, + }, + "internet_max_bandwidth_in": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "internet_max_bandwidth_out": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validateInternetMaxBandWidthOut, + }, + "system_disk_category": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateAllowedStringValue([]string{ + string(ecs.DiskCategoryCloud), + string(ecs.DiskCategoryCloudSSD), + string(ecs.DiskCategoryCloudEfficiency), + string(ecs.DiskCategoryEphemeralSSD), + }), + }, + "data_disk": &schema.Schema{ + Optional: true, + ForceNew: true, + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "category": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "instance_ids": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + MaxItems: 20, + }, + }, + } +} + +func resourceAliyunEssScalingConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + + args, err := buildAlicloudEssScalingConfigurationArgs(d, meta) + if err != nil { + return err + } + + essconn := meta.(*AliyunClient).essconn + + scaling, err := essconn.CreateScalingConfiguration(args) + if err != nil { + return err + } + + d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + scaling.ScalingConfigurationId) + + return resourceAliyunEssScalingConfigurationUpdate(d, meta) +} + +func resourceAliyunEssScalingConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + if d.HasChange("active") { + active := d.Get("active").(bool) + if !active { + return fmt.Errorf("Please active the scaling configuration directly.") + } + ids := strings.Split(d.Id(), COLON_SEPARATED) + err := client.ActiveScalingConfigurationById(ids[0], ids[1]) + + if err != nil { + return fmt.Errorf("Active scaling configuration %s err: %#v", ids[1], err) + } + } + + if err := enableEssScalingConfiguration(d, meta); err != nil { + return err + } + + return resourceAliyunEssScalingConfigurationRead(d, meta) +} + +func enableEssScalingConfiguration(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + ids := strings.Split(d.Id(), COLON_SEPARATED) + + if d.HasChange("enable") { + d.SetPartial("enable") + enable := d.Get("enable").(bool) + if !enable { + err := client.DisableScalingConfigurationById(ids[0]) + + if err != nil { + return fmt.Errorf("Disable scaling group %s err: %#v", ids[0], err) + } + } + + instance_ids := []string{} + if d.HasChange("instance_ids") { + d.SetPartial("instance_ids") + instances := d.Get("instance_ids").([]interface{}) + instance_ids = expandStringList(instances) + } + err := client.EnableScalingConfigurationById(ids[0], ids[1], instance_ids) + + if err != nil { + return fmt.Errorf("Enable scaling configuration %s err: %#v", ids[1], err) + } + } + return nil +} + +func resourceAliyunEssScalingConfigurationRead(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*AliyunClient) + ids := strings.Split(d.Id(), COLON_SEPARATED) + c, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) + if err != nil { + if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { + d.SetId("") + return nil + } + return fmt.Errorf("Error Describe ESS scaling configuration Attribute: %#v", err) + } + + d.Set("scaling_group_id", c.ScalingGroupId) + d.Set("active", c.LifecycleState == ess.Active) + d.Set("image_id", c.ImageId) + d.Set("instance_type", c.InstanceType) + d.Set("io_optimized", c.IoOptimized) + d.Set("security_group_id", c.SecurityGroupId) + d.Set("scaling_configuration_name", c.ScalingConfigurationName) + d.Set("internet_charge_type", c.InternetChargeType) + d.Set("internet_max_bandwidth_in", c.InternetMaxBandwidthIn) + d.Set("internet_max_bandwidth_out", c.InternetMaxBandwidthOut) + d.Set("system_disk_category", c.SystemDiskCategory) + d.Set("data_disk", flattenDataDiskMappings(c.DataDisks.DataDisk)) + + return nil +} + +func resourceAliyunEssScalingConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + ids := strings.Split(d.Id(), COLON_SEPARATED) + err := client.DeleteScalingConfigurationById(ids[0], ids[1]) + + if err != nil { + e, _ := err.(*common.Error) + if e.ErrorResponse.Code == IncorrectScalingConfigurationLifecycleState { + return resource.NonRetryableError( + fmt.Errorf("Scaling configuration is active - please active another one and trying again.")) + } + if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound { + return resource.RetryableError( + fmt.Errorf("Scaling configuration in use - trying again while it is deleted.")) + } + } + + _, err = client.DescribeScalingConfigurationById(ids[0], ids[1]) + if err != nil { + if notFoundError(err) { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError( + fmt.Errorf("Scaling configuration in use - trying again while it is deleted.")) + }) +} + +func buildAlicloudEssScalingConfigurationArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingConfigurationArgs, error) { + args := &ess.CreateScalingConfigurationArgs{ + ScalingGroupId: d.Get("scaling_group_id").(string), + ImageId: d.Get("image_id").(string), + InstanceType: d.Get("instance_type").(string), + IoOptimized: ecs.IoOptimized(d.Get("io_optimized").(string)), + SecurityGroupId: d.Get("security_group_id").(string), + } + + if v := d.Get("scaling_configuration_name").(string); v != "" { + args.ScalingConfigurationName = v + } + + if v := d.Get("internet_charge_type").(string); v != "" { + args.InternetChargeType = common.InternetChargeType(v) + } + + if v := d.Get("internet_max_bandwidth_in").(int); v != 0 { + args.InternetMaxBandwidthIn = v + } + + if v := d.Get("internet_max_bandwidth_out").(int); v != 0 { + args.InternetMaxBandwidthOut = v + } + + if v := d.Get("system_disk_category").(string); v != "" { + args.SystemDisk_Category = common.UnderlineString(v) + } + + dds, ok := d.GetOk("data_disk") + if ok { + disks := dds.([]interface{}) + diskTypes := []ess.DataDiskType{} + + for _, e := range disks { + pack := e.(map[string]interface{}) + disk := ess.DataDiskType{ + Size: pack["size"].(int), + Category: pack["category"].(string), + SnapshotId: pack["snapshot_id"].(string), + Device: pack["device"].(string), + } + if v := pack["size"].(int); v != 0 { + disk.Size = v + } + if v := pack["category"].(string); v != "" { + disk.Category = v + } + if v := pack["snapshot_id"].(string); v != "" { + disk.SnapshotId = v + } + if v := pack["device"].(string); v != "" { + disk.Device = v + } + diskTypes = append(diskTypes, disk) + } + args.DataDisk = diskTypes + } + + return args, nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go new file mode 100644 index 000000000..4a2269b38 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go @@ -0,0 +1,495 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "log" + "regexp" + "strings" + "testing" +) + +func TestAccAlicloudEssScalingConfiguration_basic(t *testing.T) { + var sc ess.ScalingConfigurationItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_configuration.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingConfigurationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingConfigurationConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + }, + }) +} + +func TestAccAlicloudEssScalingConfiguration_multiConfig(t *testing.T) { + var sc ess.ScalingConfigurationItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_configuration.bar", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingConfigurationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingConfiguration_multiConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.bar", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "active", + "false"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + }, + }) +} + +func SkipTestAccAlicloudEssScalingConfiguration_active(t *testing.T) { + var sc ess.ScalingConfigurationItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_configuration.bar", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingConfigurationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingConfiguration_active, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.bar", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "active", + "true"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + + resource.TestStep{ + Config: testAccEssScalingConfiguration_inActive, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.bar", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "active", + "false"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.bar", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + }, + }) +} + +func SkipTestAccAlicloudEssScalingConfiguration_enable(t *testing.T) { + var sc ess.ScalingConfigurationItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_configuration.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingConfigurationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingConfiguration_enable, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "enable", + "true"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + + resource.TestStep{ + Config: testAccEssScalingConfiguration_disable, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingConfigurationExists( + "alicloud_ess_scaling_configuration.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "enable", + "false"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "instance_type", + "ecs.s2.large"), + resource.TestMatchResourceAttr( + "alicloud_ess_scaling_configuration.foo", + "image_id", + regexp.MustCompile("^centos_6")), + ), + }, + }, + }) +} + +func testAccCheckEssScalingConfigurationExists(n string, d *ess.ScalingConfigurationItemType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ESS Scaling Configuration ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) + attr, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) + log.Printf("[DEBUG] check scaling configuration %s attribute %#v", rs.Primary.ID, attr) + + if err != nil { + return err + } + + if attr == nil { + return fmt.Errorf("Scaling Configuration not found") + } + + *d = *attr + return nil + } +} + +func testAccCheckEssScalingConfigurationDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_ess_scaling_configuration" { + continue + } + ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) + ins, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) + + if ins != nil { + return fmt.Errorf("Error ESS scaling configuration still exist") + } + + // Verify the error is what we want + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + if e.ErrorResponse.Code == InstanceNotfound { + continue + } + return err + } + } + + return nil +} + +const testAccEssScalingConfigurationConfig = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` + +const testAccEssScalingConfiguration_multiConfig = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} + +resource "alicloud_ess_scaling_configuration" "bar" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` + +const testAccEssScalingConfiguration_active = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + active = true + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` + +const testAccEssScalingConfiguration_inActive = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + active = false + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` + +const testAccEssScalingConfiguration_enable = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + enable = true + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` + +const testAccEssScalingConfiguration_disable = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + enable = false + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go new file mode 100644 index 000000000..89f4154db --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go @@ -0,0 +1,209 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "strings" + "time" +) + +func resourceAlicloudEssScalingGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunEssScalingGroupCreate, + Read: resourceAliyunEssScalingGroupRead, + Update: resourceAliyunEssScalingGroupUpdate, + Delete: resourceAliyunEssScalingGroupDelete, + + Schema: map[string]*schema.Schema{ + "min_size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(0, 100), + }, + "max_size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(0, 100), + }, + "scaling_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "default_cooldown": &schema.Schema{ + Type: schema.TypeInt, + Default: 300, + Optional: true, + ValidateFunc: validateIntegerInRange(0, 86400), + }, + "vswitch_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "removal_policies": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + MaxItems: 2, + }, + "db_instance_ids": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + MaxItems: 3, + }, + "loadbalancer_ids": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + }, + } +} + +func resourceAliyunEssScalingGroupCreate(d *schema.ResourceData, meta interface{}) error { + + args, err := buildAlicloudEssScalingGroupArgs(d, meta) + if err != nil { + return err + } + + essconn := meta.(*AliyunClient).essconn + + scaling, err := essconn.CreateScalingGroup(args) + if err != nil { + return err + } + + d.SetId(scaling.ScalingGroupId) + + return resourceAliyunEssScalingGroupUpdate(d, meta) +} + +func resourceAliyunEssScalingGroupRead(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*AliyunClient) + + scaling, err := client.DescribeScalingGroupById(d.Id()) + if err != nil { + if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { + d.SetId("") + return nil + } + return fmt.Errorf("Error Describe ESS scaling group Attribute: %#v", err) + } + + d.Set("min_size", scaling.MinSize) + d.Set("max_size", scaling.MaxSize) + d.Set("scaling_group_name", scaling.ScalingGroupName) + d.Set("default_cooldown", scaling.DefaultCooldown) + d.Set("removal_policies", scaling.RemovalPolicies) + d.Set("db_instance_ids", scaling.DBInstanceIds) + d.Set("loadbalancer_ids", scaling.LoadBalancerId) + + return nil +} + +func resourceAliyunEssScalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AliyunClient).essconn + args := &ess.ModifyScalingGroupArgs{ + ScalingGroupId: d.Id(), + } + + if d.HasChange("scaling_group_name") { + args.ScalingGroupName = d.Get("scaling_group_name").(string) + } + + if d.HasChange("min_size") { + args.MinSize = d.Get("min_size").(int) + } + + if d.HasChange("max_size") { + args.MaxSize = d.Get("max_size").(int) + } + + if d.HasChange("default_cooldown") { + args.DefaultCooldown = d.Get("default_cooldown").(int) + } + + if d.HasChange("removal_policies") { + policyStrings := d.Get("removal_policies").([]interface{}) + args.RemovalPolicy = expandStringList(policyStrings) + } + + if _, err := conn.ModifyScalingGroup(args); err != nil { + return err + } + + return resourceAliyunEssScalingGroupRead(d, meta) +} + +func resourceAliyunEssScalingGroupDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + + return resource.Retry(2*time.Minute, func() *resource.RetryError { + err := client.DeleteScalingGroupById(d.Id()) + + if err != nil { + e, _ := err.(*common.Error) + if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound { + return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted.")) + } + } + + _, err = client.DescribeScalingGroupById(d.Id()) + if err != nil { + if notFoundError(err) { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted.")) + }) +} + +func buildAlicloudEssScalingGroupArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingGroupArgs, error) { + client := meta.(*AliyunClient) + args := &ess.CreateScalingGroupArgs{ + RegionId: getRegion(d, meta), + MinSize: d.Get("min_size").(int), + MaxSize: d.Get("max_size").(int), + DefaultCooldown: d.Get("default_cooldown").(int), + } + + if v := d.Get("scaling_group_name").(string); v != "" { + args.ScalingGroupName = v + } + + if v := d.Get("vswitch_id").(string); v != "" { + args.VSwitchId = v + + // get vpcId + vpcId, err := client.GetVpcIdByVSwitchId(v) + + if err != nil { + return nil, fmt.Errorf("VswitchId %s is not valid of current region", v) + } + // fill vpcId by vswitchId + args.VpcId = vpcId + + } + + dbs, ok := d.GetOk("db_instance_ids") + if ok { + dbsStrings := dbs.([]interface{}) + args.DBInstanceId = expandStringList(dbsStrings) + } + + lbs, ok := d.GetOk("loadbalancer_ids") + if ok { + lbsStrings := lbs.([]interface{}) + args.LoadBalancerId = strings.Join(expandStringList(lbsStrings), COMMA_SEPARATED) + } + + return args, nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go new file mode 100644 index 000000000..e707035b1 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go @@ -0,0 +1,297 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "log" + "testing" +) + +func TestAccAlicloudEssScalingGroup_basic(t *testing.T) { + var sg ess.ScalingGroupItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_group.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingGroupConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingGroupExists( + "alicloud_ess_scaling_group.foo", &sg), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "min_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "max_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "scaling_group_name", + "foo"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "removal_policies.#", + "2", + ), + ), + }, + }, + }) + +} + +func TestAccAlicloudEssScalingGroup_update(t *testing.T) { + var sg ess.ScalingGroupItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_group.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingGroup, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingGroupExists( + "alicloud_ess_scaling_group.foo", &sg), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "min_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "max_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "scaling_group_name", + "foo"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "removal_policies.#", + "2", + ), + ), + }, + + resource.TestStep{ + Config: testAccEssScalingGroup_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingGroupExists( + "alicloud_ess_scaling_group.foo", &sg), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "min_size", + "2"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "max_size", + "2"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "scaling_group_name", + "update"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "removal_policies.#", + "1", + ), + ), + }, + }, + }) + +} + +func SkipTestAccAlicloudEssScalingGroup_vpc(t *testing.T) { + var sg ess.ScalingGroupItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_group.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingGroup_vpc, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingGroupExists( + "alicloud_ess_scaling_group.foo", &sg), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "min_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "max_size", + "1"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "scaling_group_name", + "foo"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_group.foo", + "removal_policies.#", + "2", + ), + ), + }, + }, + }) + +} + +func testAccCheckEssScalingGroupExists(n string, d *ess.ScalingGroupItemType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ESS Scaling Group ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + attr, err := client.DescribeScalingGroupById(rs.Primary.ID) + log.Printf("[DEBUG] check scaling group %s attribute %#v", rs.Primary.ID, attr) + + if err != nil { + return err + } + + if attr == nil { + return fmt.Errorf("Scaling Group not found") + } + + *d = *attr + return nil + } +} + +func testAccCheckEssScalingGroupDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_ess_scaling_group" { + continue + } + + ins, err := client.DescribeScalingGroupById(rs.Primary.ID) + + if ins != nil { + return fmt.Errorf("Error ESS scaling group still exist") + } + + // Verify the error is what we want + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + if e.ErrorResponse.Code == InstanceNotfound { + continue + } + return err + } + } + + return nil +} + +const testAccEssScalingGroupConfig = ` +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} +` + +const testAccEssScalingGroup = ` +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + removal_policies = ["OldestInstance", "NewestInstance"] +} +` + +const testAccEssScalingGroup_update = ` +resource "alicloud_ess_scaling_group" "foo" { + min_size = 2 + max_size = 2 + scaling_group_name = "update" + removal_policies = ["OldestInstance"] +} +` +const testAccEssScalingGroup_vpc = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +data "alicloud_zones" "default" { + "available_disk_category"= "cloud_efficiency" + "available_resource_creation"= "VSwitch" +} + +resource "alicloud_vpc" "foo" { + name = "tf_test_foo" + cidr_block = "172.16.0.0/12" +} + +resource "alicloud_vswitch" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + cidr_block = "172.16.0.0/21" + availability_zone = "${data.alicloud_zones.default.zones.0.id}" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" + vpc_id = "${alicloud_vpc.foo.id}" +} + +resource "alicloud_ess_scaling_group" "foo" { + min_size = 1 + max_size = 1 + scaling_group_name = "foo" + default_cooldown = 20 + vswitch_id = "${alicloud_vswitch.foo.id}" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" + enable = true + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.n1.medium" + io_optimized = "optimized" + system_disk_category = "cloud_efficiency" + internet_charge_type = "PayByTraffic" + internet_max_bandwidth_out = 10 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go new file mode 100644 index 000000000..bfa1f904f --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go @@ -0,0 +1,168 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "strings" + "time" +) + +func resourceAlicloudEssScalingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunEssScalingRuleCreate, + Read: resourceAliyunEssScalingRuleRead, + Update: resourceAliyunEssScalingRuleUpdate, + Delete: resourceAliyunEssScalingRuleDelete, + + Schema: map[string]*schema.Schema{ + "scaling_group_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "adjustment_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{string(ess.QuantityChangeInCapacity), + string(ess.PercentChangeInCapacity), string(ess.TotalCapacity)}), + }, + "adjustment_value": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "scaling_rule_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "ari": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cooldown": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateIntegerInRange(0, 86400), + }, + }, + } +} + +func resourceAliyunEssScalingRuleCreate(d *schema.ResourceData, meta interface{}) error { + + args, err := buildAlicloudEssScalingRuleArgs(d, meta) + if err != nil { + return err + } + + essconn := meta.(*AliyunClient).essconn + + rule, err := essconn.CreateScalingRule(args) + if err != nil { + return err + } + + d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + rule.ScalingRuleId) + + return resourceAliyunEssScalingRuleUpdate(d, meta) +} + +func resourceAliyunEssScalingRuleRead(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*AliyunClient) + ids := strings.Split(d.Id(), COLON_SEPARATED) + + rule, err := client.DescribeScalingRuleById(ids[0], ids[1]) + if err != nil { + if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { + d.SetId("") + return nil + } + return fmt.Errorf("Error Describe ESS scaling rule Attribute: %#v", err) + } + + d.Set("scaling_group_id", rule.ScalingGroupId) + d.Set("ari", rule.ScalingRuleAri) + d.Set("adjustment_type", rule.AdjustmentType) + d.Set("adjustment_value", rule.AdjustmentValue) + d.Set("scaling_rule_name", rule.ScalingRuleName) + d.Set("cooldown", rule.Cooldown) + + return nil +} + +func resourceAliyunEssScalingRuleDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + ids := strings.Split(d.Id(), COLON_SEPARATED) + + return resource.Retry(2*time.Minute, func() *resource.RetryError { + err := client.DeleteScalingRuleById(ids[1]) + + if err != nil { + return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted.")) + } + + _, err = client.DescribeScalingRuleById(ids[0], ids[1]) + if err != nil { + if notFoundError(err) { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted.")) + }) +} + +func resourceAliyunEssScalingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AliyunClient).essconn + ids := strings.Split(d.Id(), COLON_SEPARATED) + + args := &ess.ModifyScalingRuleArgs{ + ScalingRuleId: ids[1], + } + + if d.HasChange("adjustment_type") { + args.AdjustmentType = ess.AdjustmentType(d.Get("adjustment_type").(string)) + } + + if d.HasChange("adjustment_value") { + args.AdjustmentValue = d.Get("adjustment_value").(int) + } + + if d.HasChange("scaling_rule_name") { + args.ScalingRuleName = d.Get("scaling_rule_name").(string) + } + + if d.HasChange("cooldown") { + args.Cooldown = d.Get("cooldown").(int) + } + + if _, err := conn.ModifyScalingRule(args); err != nil { + return err + } + + return resourceAliyunEssScalingRuleRead(d, meta) +} + +func buildAlicloudEssScalingRuleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingRuleArgs, error) { + args := &ess.CreateScalingRuleArgs{ + RegionId: getRegion(d, meta), + ScalingGroupId: d.Get("scaling_group_id").(string), + AdjustmentType: ess.AdjustmentType(d.Get("adjustment_type").(string)), + AdjustmentValue: d.Get("adjustment_value").(int), + } + + if v := d.Get("scaling_rule_name").(string); v != "" { + args.ScalingRuleName = v + } + + if v := d.Get("cooldown").(int); v != 0 { + args.Cooldown = v + } + + return args, nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go new file mode 100644 index 000000000..81020a747 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go @@ -0,0 +1,290 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "log" + "strings" + "testing" +) + +func TestAccAlicloudEssScalingRule_basic(t *testing.T) { + var sc ess.ScalingRuleItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_rule.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingRuleConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingRuleExists( + "alicloud_ess_scaling_rule.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_type", + "TotalCapacity"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_value", + "1"), + ), + }, + }, + }) +} + +func TestAccAlicloudEssScalingRule_update(t *testing.T) { + var sc ess.ScalingRuleItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_scaling_rule.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScalingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScalingRule, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingRuleExists( + "alicloud_ess_scaling_rule.foo", &sc), + testAccCheckEssScalingRuleExists( + "alicloud_ess_scaling_rule.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_type", + "TotalCapacity"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_value", + "1"), + ), + }, + + resource.TestStep{ + Config: testAccEssScalingRule_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScalingRuleExists( + "alicloud_ess_scaling_rule.foo", &sc), + testAccCheckEssScalingRuleExists( + "alicloud_ess_scaling_rule.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_type", + "TotalCapacity"), + resource.TestCheckResourceAttr( + "alicloud_ess_scaling_rule.foo", + "adjustment_value", + "2"), + ), + }, + }, + }) +} + +func testAccCheckEssScalingRuleExists(n string, d *ess.ScalingRuleItemType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ESS Scaling Rule ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) + attr, err := client.DescribeScalingRuleById(ids[0], ids[1]) + log.Printf("[DEBUG] check scaling rule %s attribute %#v", rs.Primary.ID, attr) + + if err != nil { + return err + } + + if attr == nil { + return fmt.Errorf("Scaling rule not found") + } + + *d = *attr + return nil + } +} + +func testAccCheckEssScalingRuleDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_ess_scaling_rule" { + continue + } + ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) + ins, err := client.DescribeScalingRuleById(ids[0], ids[1]) + + if ins != nil { + return fmt.Errorf("Error ESS scaling rule still exist") + } + + // Verify the error is what we want + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + if e.ErrorResponse.Code == InstanceNotfound { + continue + } + return err + } + } + + return nil +} + +const testAccEssScalingRuleConfig = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "bar" { + min_size = 1 + max_size = 1 + scaling_group_name = "bar" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} + +resource "alicloud_ess_scaling_rule" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + adjustment_type = "TotalCapacity" + adjustment_value = 1 + cooldown = 120 +} +` + +const testAccEssScalingRule = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "bar" { + min_size = 1 + max_size = 1 + scaling_group_name = "bar" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} + +resource "alicloud_ess_scaling_rule" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + adjustment_type = "TotalCapacity" + adjustment_value = 1 + cooldown = 120 +} +` + +const testAccEssScalingRule_update = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "bar" { + min_size = 1 + max_size = 1 + scaling_group_name = "bar" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} + +resource "alicloud_ess_scaling_rule" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + adjustment_type = "TotalCapacity" + adjustment_value = 2 + cooldown = 60 +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_schedule.go b/builtin/providers/alicloud/resource_alicloud_ess_schedule.go new file mode 100644 index 000000000..4e5660a50 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_schedule.go @@ -0,0 +1,220 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "time" +) + +func resourceAlicloudEssSchedule() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunEssScheduleCreate, + Read: resourceAliyunEssScheduleRead, + Update: resourceAliyunEssScheduleUpdate, + Delete: resourceAliyunEssScheduleDelete, + + Schema: map[string]*schema.Schema{ + "scheduled_action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "launch_time": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "scheduled_task_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "launch_expiration_time": &schema.Schema{ + Type: schema.TypeInt, + Default: 600, + Optional: true, + ValidateFunc: validateIntegerInRange(0, 21600), + }, + "recurrence_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateAllowedStringValue([]string{string(ess.Daily), + string(ess.Weekly), string(ess.Monthly)}), + }, + "recurrence_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "recurrence_end_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "task_enabled": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + }, + } +} + +func resourceAliyunEssScheduleCreate(d *schema.ResourceData, meta interface{}) error { + + args, err := buildAlicloudEssScheduleArgs(d, meta) + if err != nil { + return err + } + + essconn := meta.(*AliyunClient).essconn + + rule, err := essconn.CreateScheduledTask(args) + if err != nil { + return err + } + + d.SetId(rule.ScheduledTaskId) + + return resourceAliyunEssScheduleUpdate(d, meta) +} + +func resourceAliyunEssScheduleRead(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*AliyunClient) + + rule, err := client.DescribeScheduleById(d.Id()) + if err != nil { + if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { + d.SetId("") + return nil + } + return fmt.Errorf("Error Describe ESS schedule Attribute: %#v", err) + } + + d.Set("scheduled_action", rule.ScheduledAction) + d.Set("launch_time", rule.LaunchTime) + d.Set("scheduled_task_name", rule.ScheduledTaskName) + d.Set("description", rule.Description) + d.Set("launch_expiration_time", rule.LaunchExpirationTime) + d.Set("recurrence_type", rule.RecurrenceType) + d.Set("recurrence_value", rule.RecurrenceValue) + d.Set("recurrence_end_time", rule.RecurrenceEndTime) + d.Set("task_enabled", rule.TaskEnabled) + + return nil +} + +func resourceAliyunEssScheduleUpdate(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AliyunClient).essconn + + args := &ess.ModifyScheduledTaskArgs{ + ScheduledTaskId: d.Id(), + } + + if d.HasChange("scheduled_task_name") { + args.ScheduledTaskName = d.Get("scheduled_task_name").(string) + } + + if d.HasChange("description") { + args.Description = d.Get("description").(string) + } + + if d.HasChange("scheduled_action") { + args.ScheduledAction = d.Get("scheduled_action").(string) + } + + if d.HasChange("launch_time") { + args.LaunchTime = d.Get("launch_time").(string) + } + + if d.HasChange("launch_expiration_time") { + args.LaunchExpirationTime = d.Get("launch_expiration_time").(int) + } + + if d.HasChange("recurrence_type") { + args.RecurrenceType = ess.RecurrenceType(d.Get("recurrence_type").(string)) + } + + if d.HasChange("recurrence_value") { + args.RecurrenceValue = d.Get("recurrence_value").(string) + } + + if d.HasChange("recurrence_end_time") { + args.RecurrenceEndTime = d.Get("recurrence_end_time").(string) + } + + if d.HasChange("task_enabled") { + args.TaskEnabled = d.Get("task_enabled").(bool) + } + + if _, err := conn.ModifyScheduledTask(args); err != nil { + return err + } + + return resourceAliyunEssScheduleRead(d, meta) +} + +func resourceAliyunEssScheduleDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + + return resource.Retry(2*time.Minute, func() *resource.RetryError { + err := client.DeleteScheduleById(d.Id()) + + if err != nil { + return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted.")) + } + + _, err = client.DescribeScheduleById(d.Id()) + if err != nil { + if notFoundError(err) { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted.")) + }) +} + +func buildAlicloudEssScheduleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScheduledTaskArgs, error) { + args := &ess.CreateScheduledTaskArgs{ + RegionId: getRegion(d, meta), + ScheduledAction: d.Get("scheduled_action").(string), + LaunchTime: d.Get("launch_time").(string), + TaskEnabled: d.Get("task_enabled").(bool), + } + + if v := d.Get("scheduled_task_name").(string); v != "" { + args.ScheduledTaskName = v + } + + if v := d.Get("description").(string); v != "" { + args.Description = v + } + + if v := d.Get("recurrence_type").(string); v != "" { + args.RecurrenceType = ess.RecurrenceType(v) + } + + if v := d.Get("recurrence_value").(string); v != "" { + args.RecurrenceValue = v + } + + if v := d.Get("recurrence_end_time").(string); v != "" { + args.RecurrenceEndTime = v + } + + if v := d.Get("launch_expiration_time").(int); v != 0 { + args.LaunchExpirationTime = v + } + + return args, nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go b/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go new file mode 100644 index 000000000..cb8044cc4 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go @@ -0,0 +1,151 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ess" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "log" + "testing" +) + +func TestAccAlicloudEssSchedule_basic(t *testing.T) { + var sc ess.ScheduledTaskItemType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_ess_schedule.foo", + + Providers: testAccProviders, + CheckDestroy: testAccCheckEssScheduleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccEssScheduleConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckEssScheduleExists( + "alicloud_ess_schedule.foo", &sc), + resource.TestCheckResourceAttr( + "alicloud_ess_schedule.foo", + "launch_time", + "2017-04-29T07:30Z"), + resource.TestCheckResourceAttr( + "alicloud_ess_schedule.foo", + "task_enabled", + "true"), + ), + }, + }, + }) +} + +func testAccCheckEssScheduleExists(n string, d *ess.ScheduledTaskItemType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ESS Schedule ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + attr, err := client.DescribeScheduleById(rs.Primary.ID) + log.Printf("[DEBUG] check schedule %s attribute %#v", rs.Primary.ID, attr) + + if err != nil { + return err + } + + if attr == nil { + return fmt.Errorf("Ess schedule not found") + } + + *d = *attr + return nil + } +} + +func testAccCheckEssScheduleDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_ess_schedule" { + continue + } + ins, err := client.DescribeScheduleById(rs.Primary.ID) + + if ins != nil { + return fmt.Errorf("Error ESS schedule still exist") + } + + // Verify the error is what we want + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + if e.ErrorResponse.Code == InstanceNotfound { + continue + } + return err + } + } + + return nil +} + +const testAccEssScheduleConfig = ` +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "tf_test_foo" { + name = "tf_test_foo" + description = "foo" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.tf_test_foo.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "bar" { + min_size = 1 + max_size = 1 + scaling_group_name = "bar" + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "ecs.s2.large" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.tf_test_foo.id}" +} + +resource "alicloud_ess_scaling_rule" "foo" { + scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" + adjustment_type = "TotalCapacity" + adjustment_value = 2 + cooldown = 60 +} + +resource "alicloud_ess_schedule" "foo" { + scheduled_action = "${alicloud_ess_scaling_rule.foo.ari}" + launch_time = "2017-04-29T07:30Z" + scheduled_task_name = "tf-foo" +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_forward.go b/builtin/providers/alicloud/resource_alicloud_forward.go new file mode 100644 index 000000000..8f75c54d0 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_forward.go @@ -0,0 +1,165 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAliyunForwardEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunForwardEntryCreate, + Read: resourceAliyunForwardEntryRead, + Update: resourceAliyunForwardEntryUpdate, + Delete: resourceAliyunForwardEntryDelete, + + Schema: map[string]*schema.Schema{ + "forward_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "external_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "external_port": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateForwardPort, + }, + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"tcp", "udp", "any"}), + }, + "internal_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "internal_port": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateForwardPort, + }, + }, + } +} + +func resourceAliyunForwardEntryCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AliyunClient).vpcconn + + args := &ecs.CreateForwardEntryArgs{ + RegionId: getRegion(d, meta), + ForwardTableId: d.Get("forward_table_id").(string), + ExternalIp: d.Get("external_ip").(string), + ExternalPort: d.Get("external_port").(string), + IpProtocol: d.Get("ip_protocol").(string), + InternalIp: d.Get("internal_ip").(string), + InternalPort: d.Get("internal_port").(string), + } + + resp, err := conn.CreateForwardEntry(args) + if err != nil { + return fmt.Errorf("CreateForwardEntry got error: %#v", err) + } + + d.SetId(resp.ForwardEntryId) + d.Set("forward_table_id", d.Get("forward_table_id").(string)) + + return resourceAliyunForwardEntryRead(d, meta) +} + +func resourceAliyunForwardEntryRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + + forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id()) + + if err != nil { + if notFoundError(err) { + return nil + } + return err + } + + d.Set("forward_table_id", forwardEntry.ForwardTableId) + d.Set("external_ip", forwardEntry.ExternalIp) + d.Set("external_port", forwardEntry.ExternalPort) + d.Set("ip_protocol", forwardEntry.IpProtocol) + d.Set("internal_ip", forwardEntry.InternalIp) + d.Set("internal_port", forwardEntry.InternalPort) + + return nil +} + +func resourceAliyunForwardEntryUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + conn := client.vpcconn + + forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id()) + if err != nil { + return err + } + + d.Partial(true) + attributeUpdate := false + args := &ecs.ModifyForwardEntryArgs{ + RegionId: getRegion(d, meta), + ForwardTableId: forwardEntry.ForwardTableId, + ForwardEntryId: forwardEntry.ForwardEntryId, + ExternalIp: forwardEntry.ExternalIp, + IpProtocol: forwardEntry.IpProtocol, + ExternalPort: forwardEntry.ExternalPort, + InternalIp: forwardEntry.InternalIp, + InternalPort: forwardEntry.InternalPort, + } + + if d.HasChange("external_port") { + d.SetPartial("external_port") + args.ExternalPort = d.Get("external_port").(string) + attributeUpdate = true + } + + if d.HasChange("ip_protocol") { + d.SetPartial("ip_protocol") + args.IpProtocol = d.Get("ip_protocol").(string) + attributeUpdate = true + } + + if d.HasChange("internal_port") { + d.SetPartial("internal_port") + args.InternalPort = d.Get("internal_port").(string) + attributeUpdate = true + } + + if attributeUpdate { + if err := conn.ModifyForwardEntry(args); err != nil { + return err + } + } + + d.Partial(false) + + return resourceAliyunForwardEntryRead(d, meta) +} + +func resourceAliyunForwardEntryDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + conn := client.vpcconn + + forwardEntryId := d.Id() + forwardTableId := d.Get("forward_table_id").(string) + + args := &ecs.DeleteForwardEntryArgs{ + RegionId: getRegion(d, meta), + ForwardTableId: forwardTableId, + ForwardEntryId: forwardEntryId, + } + + if err := conn.DeleteForwardEntry(args); err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_forward_test.go b/builtin/providers/alicloud/resource_alicloud_forward_test.go new file mode 100644 index 000000000..60a67f322 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_forward_test.go @@ -0,0 +1,216 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "testing" +) + +func TestAccAlicloudForward_basic(t *testing.T) { + var forward ecs.ForwardTableEntrySetType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_forward_entry.foo", + Providers: testAccProviders, + CheckDestroy: testAccCheckForwardEntryDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccForwardEntryConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckForwardEntryExists( + "alicloud_forward_entry.foo", &forward), + ), + }, + + resource.TestStep{ + Config: testAccForwardEntryUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckForwardEntryExists( + "alicloud_forward_entry.foo", &forward), + ), + }, + }, + }) + +} + +func testAccCheckForwardEntryDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_snat_entry" { + continue + } + + // Try to find the Snat entry + instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID) + + //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" + if instance.ForwardEntryId == "" { + return nil + } + + if instance.ForwardEntryId != "" { + return fmt.Errorf("Forward entry still exist") + } + + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + + if !notFoundError(e) { + return err + } + } + + } + + return nil +} + +func testAccCheckForwardEntryExists(n string, snat *ecs.ForwardTableEntrySetType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ForwardEntry ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID) + + if err != nil { + return err + } + if instance.ForwardEntryId == "" { + return fmt.Errorf("ForwardEntry not found") + } + + *snat = instance + return nil + } +} + +const testAccForwardEntryConfig = ` +provider "alicloud"{ + region = "cn-hangzhou" +} + +data "alicloud_zones" "default" { + "available_resource_creation"= "VSwitch" +} + +resource "alicloud_vpc" "foo" { + name = "tf_test_foo" + cidr_block = "172.16.0.0/12" +} + +resource "alicloud_vswitch" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + cidr_block = "172.16.0.0/21" + availability_zone = "${data.alicloud_zones.default.zones.0.id}" +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + bandwidth_packages = [{ + ip_count = 1 + bandwidth = 5 + zone = "${data.alicloud_zones.default.zones.0.id}" + },{ + ip_count = 1 + bandwidth = 6 + zone = "${data.alicloud_zones.default.zones.0.id}" + }] + depends_on = [ + "alicloud_vswitch.foo"] +} + +resource "alicloud_forward_entry" "foo"{ + forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" + external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" + external_port = "80" + ip_protocol = "tcp" + internal_ip = "172.16.0.3" + internal_port = "8080" +} + +resource "alicloud_forward_entry" "foo1"{ + forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" + external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" + external_port = "443" + ip_protocol = "udp" + internal_ip = "172.16.0.4" + internal_port = "8080" +} +` + +const testAccForwardEntryUpdate = ` +provider "alicloud"{ + region = "cn-hangzhou" +} + +data "alicloud_zones" "default" { + "available_resource_creation"= "VSwitch" +} + +resource "alicloud_vpc" "foo" { + name = "tf_test_foo" + cidr_block = "172.16.0.0/12" +} + +resource "alicloud_vswitch" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + cidr_block = "172.16.0.0/21" + availability_zone = "${data.alicloud_zones.default.zones.0.id}" +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + bandwidth_packages = [{ + ip_count = 1 + bandwidth = 5 + zone = "${data.alicloud_zones.default.zones.0.id}" + },{ + ip_count = 1 + bandwidth = 6 + zone = "${data.alicloud_zones.default.zones.0.id}" + }] + depends_on = [ + "alicloud_vswitch.foo"] +} + +resource "alicloud_forward_entry" "foo"{ + forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" + external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" + external_port = "80" + ip_protocol = "tcp" + internal_ip = "172.16.0.3" + internal_port = "8081" +} + + +resource "alicloud_forward_entry" "foo1"{ + forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" + external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" + external_port = "22" + ip_protocol = "udp" + internal_ip = "172.16.0.4" + internal_port = "8080" +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_instance.go b/builtin/providers/alicloud/resource_alicloud_instance.go index 36297afbe..492780917 100644 --- a/builtin/providers/alicloud/resource_alicloud_instance.go +++ b/builtin/providers/alicloud/resource_alicloud_instance.go @@ -6,11 +6,12 @@ import ( "encoding/base64" "encoding/json" - "strings" - "github.com/denverdino/aliyungo/common" "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + "strings" + "time" ) func resourceAliyunInstance() *schema.Resource { @@ -194,19 +195,16 @@ func resourceAliyunInstanceCreate(d *schema.ResourceData, meta interface{}) erro //d.Set("system_disk_category", d.Get("system_disk_category")) //d.Set("system_disk_size", d.Get("system_disk_size")) + if err := allocateIpAndBandWidthRelative(d, meta); err != nil { + return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err) + } + // after instance created, its status is pending, // so we need to wait it become to stopped and then start it if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Stopped, err) } - if d.Get("allocate_public_ip").(bool) { - _, err := conn.AllocatePublicIpAddress(d.Id()) - if err != nil { - log.Printf("[DEBUG] AllocatePublicIpAddress for instance got error: %#v", err) - } - } - if err := conn.StartInstance(d.Id()); err != nil { return fmt.Errorf("Start instance got error: %#v", err) } @@ -258,11 +256,12 @@ func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) } - if d.Get("allocate_public_ip").(bool) { - _, err := conn.AllocatePublicIpAddress(d.Id()) - if err != nil { - log.Printf("[DEBUG] AllocatePublicIpAddress for instance got error: %#v", err) - } + if err := allocateIpAndBandWidthRelative(d, meta); err != nil { + return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err) + } + + if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { + log.Printf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) } return resourceAliyunInstanceUpdate(d, meta) @@ -458,30 +457,47 @@ func resourceAliyunInstanceDelete(d *schema.ResourceData, meta interface{}) erro client := meta.(*AliyunClient) conn := client.ecsconn - instance, err := client.QueryInstancesById(d.Id()) - if err != nil { - if notFoundError(err) { - return nil - } - return fmt.Errorf("Error DescribeInstanceAttribute: %#v", err) - } - - if instance.Status != ecs.Stopped { - if err := conn.StopInstance(d.Id(), true); err != nil { - return err + return resource.Retry(5*time.Minute, func() *resource.RetryError { + instance, err := client.QueryInstancesById(d.Id()) + if err != nil { + if notFoundError(err) { + return nil + } } - if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { - return err + if instance.Status != ecs.Stopped { + if err := conn.StopInstance(d.Id(), true); err != nil { + return resource.RetryableError(fmt.Errorf("ECS stop error - trying again.")) + } + + if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { + return resource.RetryableError(fmt.Errorf("Waiting for ecs stopped timeout - trying again.")) + } + } + + if err := conn.DeleteInstance(d.Id()); err != nil { + return resource.RetryableError(fmt.Errorf("ECS Instance in use - trying again while it is deleted.")) + } + + return nil + }) + +} + +func allocateIpAndBandWidthRelative(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AliyunClient).ecsconn + if d.Get("allocate_public_ip").(bool) { + if d.Get("internet_max_bandwidth_out") == 0 { + return fmt.Errorf("Error: if allocate_public_ip is true than the internet_max_bandwidth_out cannot equal zero.") + } + _, err := conn.AllocatePublicIpAddress(d.Id()) + if err != nil { + return fmt.Errorf("[DEBUG] AllocatePublicIpAddress for instance got error: %#v", err) } } - - if err := conn.DeleteInstance(d.Id()); err != nil { - return err - } - return nil } + func buildAliyunRunInstancesArgs(d *schema.ResourceData, meta interface{}) (*ecs.RunInstanceArgs, error) { args := &ecs.RunInstanceArgs{ MaxAmount: DEFAULT_INSTANCE_COUNT, @@ -567,7 +583,6 @@ func buildAliyunInstanceArgs(d *schema.ResourceData, meta interface{}) (*ecs.Cre args.Description = v } - log.Printf("[DEBUG] SystemDisk is %d", systemDiskSize) if v := d.Get("internet_charge_type").(string); v != "" { args.InternetChargeType = common.InternetChargeType(v) } diff --git a/builtin/providers/alicloud/resource_alicloud_nat_gateway.go b/builtin/providers/alicloud/resource_alicloud_nat_gateway.go index 99e71347a..7851e661c 100644 --- a/builtin/providers/alicloud/resource_alicloud_nat_gateway.go +++ b/builtin/providers/alicloud/resource_alicloud_nat_gateway.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "log" + "strconv" "strings" "time" ) @@ -44,6 +45,16 @@ func resourceAliyunNatGateway() *schema.Resource { Computed: true, }, + "snat_table_ids": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "forward_table_ids": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "bandwidth_packages": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Resource{ @@ -60,6 +71,10 @@ func resourceAliyunNatGateway() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "public_ip_addresses": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, }, Required: true, @@ -133,8 +148,16 @@ func resourceAliyunNatGatewayRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", natGateway.Name) d.Set("spec", natGateway.Spec) d.Set("bandwidth_package_ids", strings.Join(natGateway.BandwidthPackageIds.BandwidthPackageId, ",")) + d.Set("snat_table_ids", strings.Join(natGateway.SnatTableIds.SnatTableId, ",")) + d.Set("forward_table_ids", strings.Join(natGateway.ForwardTableIds.ForwardTableId, ",")) d.Set("description", natGateway.Description) d.Set("vpc_id", natGateway.VpcId) + bindWidthPackages, err := flattenBandWidthPackages(natGateway.BandwidthPackageIds.BandwidthPackageId, meta, d) + if err != nil { + log.Printf("[ERROR] bindWidthPackages flattenBandWidthPackages failed. natgateway id is %#v", d.Id()) + } else { + d.Set("bandwidth_packages", bindWidthPackages) + } return nil } @@ -254,7 +277,7 @@ func resourceAliyunNatGatewayDelete(d *schema.ResourceData, meta interface{}) er } args := &ecs.DeleteNatGatewayArgs{ - RegionId: client.Region, + RegionId: getRegion(d, meta), NatGatewayId: d.Id(), } @@ -267,7 +290,7 @@ func resourceAliyunNatGatewayDelete(d *schema.ResourceData, meta interface{}) er } describeArgs := &ecs.DescribeNatGatewaysArgs{ - RegionId: client.Region, + RegionId: getRegion(d, meta), NatGatewayId: d.Id(), } gw, _, gwErr := conn.DescribeNatGateways(describeArgs) @@ -282,3 +305,69 @@ func resourceAliyunNatGatewayDelete(d *schema.ResourceData, meta interface{}) er return resource.RetryableError(fmt.Errorf("NatGateway in use - trying again while it is deleted.")) }) } + +func flattenBandWidthPackages(bandWidthPackageIds []string, meta interface{}, d *schema.ResourceData) ([]map[string]interface{}, error) { + + packageLen := len(bandWidthPackageIds) + result := make([]map[string]interface{}, 0, packageLen) + + for i := packageLen - 1; i >= 0; i-- { + packageId := bandWidthPackageIds[i] + packages, err := getPackages(packageId, meta, d) + if err != nil { + log.Printf("[ERROR] NatGateways getPackages failed. packageId is %#v", packageId) + return result, err + } + ipAddress := flattenPackPublicIp(packages.PublicIpAddresses.PublicIpAddresse) + ipCont, ipContErr := strconv.Atoi(packages.IpCount) + bandWidth, bandWidthErr := strconv.Atoi(packages.Bandwidth) + if ipContErr != nil { + log.Printf("[ERROR] NatGateways getPackages failed: ipCont convert error. packageId is %#v", packageId) + return result, ipContErr + } + if bandWidthErr != nil { + log.Printf("[ERROR] NatGateways getPackages failed: bandWidthErr convert error. packageId is %#v", packageId) + return result, bandWidthErr + } + l := map[string]interface{}{ + "ip_count": ipCont, + "bandwidth": bandWidth, + "zone": packages.ZoneId, + "public_ip_addresses": ipAddress, + } + result = append(result, l) + } + return result, nil +} + +func getPackages(packageId string, meta interface{}, d *schema.ResourceData) (*ecs.DescribeBandwidthPackageType, error) { + client := meta.(*AliyunClient) + conn := client.vpcconn + packages, err := conn.DescribeBandwidthPackages(&ecs.DescribeBandwidthPackagesArgs{ + RegionId: getRegion(d, meta), + BandwidthPackageId: packageId, + }) + + if err != nil { + log.Printf("[ERROR] Describe bandwidth package is failed, BandwidthPackageId Id: %s", packageId) + return nil, err + } + + if len(packages) == 0 { + return nil, common.GetClientErrorFromString(InstanceNotfound) + } + + return &packages[0], nil + +} + +func flattenPackPublicIp(publicIpAddressList []ecs.PublicIpAddresseType) string { + var result []string + + for _, publicIpAddresses := range publicIpAddressList { + ipAddress := publicIpAddresses.IpAddress + result = append(result, ipAddress) + } + + return strings.Join(result, ",") +} diff --git a/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go b/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go index a928c5dc1..963be3cb1 100644 --- a/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go +++ b/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go @@ -48,6 +48,7 @@ func TestAccAlicloudNatGateway_basic(t *testing.T) { "alicloud_nat_gateway.foo", "name", "test_foo"), + testAccCheckNatgatewayIpAddress("alicloud_nat_gateway.foo", &nat), ), }, }, @@ -96,6 +97,31 @@ func TestAccAlicloudNatGateway_spec(t *testing.T) { } +func testAccCheckNatgatewayIpAddress(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No NatGateway ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + natGateway, err := client.DescribeNatGateway(rs.Primary.ID) + + if err != nil { + return err + } + if natGateway == nil { + return fmt.Errorf("Natgateway not found") + } + + return nil + } +} + func testAccCheckNatGatewayExists(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -164,7 +190,7 @@ resource "alicloud_vpc" "foo" { resource "alicloud_vswitch" "foo" { vpc_id = "${alicloud_vpc.foo.id}" cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" + availability_zone = "${data.alicloud_zones.default.zones.2.id}" } resource "alicloud_nat_gateway" "foo" { @@ -174,11 +200,19 @@ resource "alicloud_nat_gateway" "foo" { bandwidth_packages = [{ ip_count = 1 bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.0.id}" + zone = "${data.alicloud_zones.default.zones.2.id}" }, { ip_count = 2 - bandwidth = 10 - zone = "${data.alicloud_zones.default.zones.0.id}" + bandwidth = 6 + zone = "${data.alicloud_zones.default.zones.2.id}" + }, { + ip_count = 3 + bandwidth = 7 + zone = "${data.alicloud_zones.default.zones.2.id}" + }, { + ip_count = 1 + bandwidth = 8 + zone = "${data.alicloud_zones.default.zones.2.id}" }] depends_on = [ "alicloud_vswitch.foo"] diff --git a/builtin/providers/alicloud/resource_alicloud_security_group.go b/builtin/providers/alicloud/resource_alicloud_security_group.go index 5f85bfd29..b1d60f704 100644 --- a/builtin/providers/alicloud/resource_alicloud_security_group.go +++ b/builtin/providers/alicloud/resource_alicloud_security_group.go @@ -74,6 +74,11 @@ func resourceAliyunSecurityGroupRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error DescribeSecurityGroupAttribute: %#v", err) } + if sg == nil { + d.SetId("") + return nil + } + d.Set("name", sg.SecurityGroupName) d.Set("description", sg.Description) diff --git a/builtin/providers/alicloud/resource_alicloud_security_group_rule.go b/builtin/providers/alicloud/resource_alicloud_security_group_rule.go index c43db23a8..56e4de670 100644 --- a/builtin/providers/alicloud/resource_alicloud_security_group_rule.go +++ b/builtin/providers/alicloud/resource_alicloud_security_group_rule.go @@ -3,9 +3,10 @@ package alicloud import ( "fmt" "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" - "log" "strings" + "time" ) func resourceAliyunSecurityGroupRule() *schema.Resource { @@ -141,7 +142,7 @@ func resourceAliyunSecurityGroupRuleRead(d *schema.ResourceData, meta interface{ } return fmt.Errorf("Error SecurityGroup rule: %#v", err) } - log.Printf("[WARN]sg %s, type %s, protocol %s, port %s, rule %#v", sgId, direction, ip_protocol, port_range, rule) + d.Set("type", rule.Direction) d.Set("ip_protocol", strings.ToLower(string(rule.IpProtocol))) d.Set("nic_type", rule.NicType) @@ -163,7 +164,7 @@ func resourceAliyunSecurityGroupRuleRead(d *schema.ResourceData, meta interface{ return nil } -func resourceAliyunSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { +func deleteSecurityGroupRule(d *schema.ResourceData, meta interface{}) error { client := meta.(*AliyunClient) ruleType := d.Get("type").(string) @@ -187,6 +188,30 @@ func resourceAliyunSecurityGroupRuleDelete(d *schema.ResourceData, meta interfac AuthorizeSecurityGroupEgressArgs: *args, } return client.RevokeSecurityGroupEgress(revokeArgs) +} + +func resourceAliyunSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + parts := strings.Split(d.Id(), ":") + sgId, direction, ip_protocol, port_range, nic_type := parts[0], parts[1], parts[2], parts[3], parts[4] + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + err := deleteSecurityGroupRule(d, meta) + + if err != nil { + resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted.")) + } + + _, err = client.DescribeSecurityGroupRule(sgId, direction, nic_type, ip_protocol, port_range) + if err != nil { + if notFoundError(err) { + return nil + } + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted.")) + }) } diff --git a/builtin/providers/alicloud/resource_alicloud_slb.go b/builtin/providers/alicloud/resource_alicloud_slb.go index f3d2af9d3..de8a8906d 100644 --- a/builtin/providers/alicloud/resource_alicloud_slb.go +++ b/builtin/providers/alicloud/resource_alicloud_slb.go @@ -281,6 +281,11 @@ func resourceAliyunSlbRead(d *schema.ResourceData, meta interface{}) error { return err } + if loadBalancer == nil { + d.SetId("") + return nil + } + d.Set("name", loadBalancer.LoadBalancerName) if loadBalancer.AddressType == slb.InternetAddressType { diff --git a/builtin/providers/alicloud/resource_alicloud_slb_attachment.go b/builtin/providers/alicloud/resource_alicloud_slb_attachment.go index 6a9163c07..74e13c26c 100644 --- a/builtin/providers/alicloud/resource_alicloud_slb_attachment.go +++ b/builtin/providers/alicloud/resource_alicloud_slb_attachment.go @@ -64,10 +64,14 @@ func resourceAliyunSlbAttachmentRead(d *schema.ResourceData, meta interface{}) e if err != nil { if notFoundError(err) { d.SetId("") - return fmt.Errorf("Read special SLB Id not found: %#v", err) + return nil } + return fmt.Errorf("Read special SLB Id not found: %#v", err) + } - return err + if loadBalancer == nil { + d.SetId("") + return nil } backendServerType := loadBalancer.BackendServers diff --git a/builtin/providers/alicloud/resource_alicloud_snat.go b/builtin/providers/alicloud/resource_alicloud_snat.go new file mode 100644 index 000000000..887d50388 --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_snat.go @@ -0,0 +1,134 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAliyunSnatEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceAliyunSnatEntryCreate, + Read: resourceAliyunSnatEntryRead, + Update: resourceAliyunSnatEntryUpdate, + Delete: resourceAliyunSnatEntryDelete, + + Schema: map[string]*schema.Schema{ + "snat_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "source_vswitch_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "snat_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAliyunSnatEntryCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AliyunClient).vpcconn + + args := &ecs.CreateSnatEntryArgs{ + RegionId: getRegion(d, meta), + SnatTableId: d.Get("snat_table_id").(string), + SourceVSwitchId: d.Get("source_vswitch_id").(string), + SnatIp: d.Get("snat_ip").(string), + } + + resp, err := conn.CreateSnatEntry(args) + if err != nil { + return fmt.Errorf("CreateSnatEntry got error: %#v", err) + } + + d.SetId(resp.SnatEntryId) + d.Set("snat_table_id", d.Get("snat_table_id").(string)) + + return resourceAliyunSnatEntryRead(d, meta) +} + +func resourceAliyunSnatEntryRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + + snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id()) + + if err != nil { + if notFoundError(err) { + return nil + } + return err + } + + d.Set("snat_table_id", snatEntry.SnatTableId) + d.Set("source_vswitch_id", snatEntry.SourceVSwitchId) + d.Set("snat_ip", snatEntry.SnatIp) + + return nil +} + +func resourceAliyunSnatEntryUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + conn := client.vpcconn + + snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id()) + if err != nil { + return err + } + + d.Partial(true) + attributeUpdate := false + args := &ecs.ModifySnatEntryArgs{ + RegionId: getRegion(d, meta), + SnatTableId: snatEntry.SnatTableId, + SnatEntryId: snatEntry.SnatEntryId, + } + + if d.HasChange("snat_ip") { + d.SetPartial("snat_ip") + var snat_ip string + if v, ok := d.GetOk("snat_ip"); ok { + snat_ip = v.(string) + } else { + return fmt.Errorf("cann't change snap_ip to empty string") + } + args.SnatIp = snat_ip + + attributeUpdate = true + } + + if attributeUpdate { + if err := conn.ModifySnatEntry(args); err != nil { + return err + } + } + + d.Partial(false) + + return resourceAliyunSnatEntryRead(d, meta) +} + +func resourceAliyunSnatEntryDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AliyunClient) + conn := client.vpcconn + + snatEntryId := d.Id() + snatTableId := d.Get("snat_table_id").(string) + + args := &ecs.DeleteSnatEntryArgs{ + RegionId: getRegion(d, meta), + SnatTableId: snatTableId, + SnatEntryId: snatEntryId, + } + + if err := conn.DeleteSnatEntry(args); err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/alicloud/resource_alicloud_snat_test.go b/builtin/providers/alicloud/resource_alicloud_snat_test.go new file mode 100644 index 000000000..673ff59dd --- /dev/null +++ b/builtin/providers/alicloud/resource_alicloud_snat_test.go @@ -0,0 +1,180 @@ +package alicloud + +import ( + "fmt" + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ecs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "testing" +) + +func TestAccAlicloudSnat_basic(t *testing.T) { + var snat ecs.SnatEntrySetType + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + + // module name + IDRefreshName: "alicloud_snat_entry.foo", + Providers: testAccProviders, + CheckDestroy: testAccCheckSnatEntryDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccSnatEntryConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckSnatEntryExists( + "alicloud_snat_entry.foo", &snat), + ), + }, + resource.TestStep{ + Config: testAccSnatEntryUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckSnatEntryExists( + "alicloud_snat_entry.foo", &snat), + ), + }, + }, + }) + +} + +func testAccCheckSnatEntryDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*AliyunClient) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "alicloud_snat_entry" { + continue + } + + // Try to find the Snat entry + instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID) + + //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" + if instance.SnatEntryId == "" { + return nil + } + + if instance.SnatEntryId != "" { + return fmt.Errorf("Snat entry still exist") + } + + if err != nil { + // Verify the error is what we want + e, _ := err.(*common.Error) + + if !notFoundError(e) { + return err + } + } + + } + + return nil +} + +func testAccCheckSnatEntryExists(n string, snat *ecs.SnatEntrySetType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No SnatEntry ID is set") + } + + client := testAccProvider.Meta().(*AliyunClient) + instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID) + + if err != nil { + return err + } + if instance.SnatEntryId == "" { + return fmt.Errorf("SnatEntry not found") + } + + *snat = instance + return nil + } +} + +const testAccSnatEntryConfig = ` +data "alicloud_zones" "default" { + "available_resource_creation"= "VSwitch" +} + +resource "alicloud_vpc" "foo" { + name = "tf_test_foo" + cidr_block = "172.16.0.0/12" +} + +resource "alicloud_vswitch" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + cidr_block = "172.16.0.0/21" + availability_zone = "${data.alicloud_zones.default.zones.2.id}" +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + bandwidth_packages = [{ + ip_count = 2 + bandwidth = 5 + zone = "${data.alicloud_zones.default.zones.2.id}" + },{ + ip_count = 1 + bandwidth = 6 + zone = "${data.alicloud_zones.default.zones.2.id}" + }] + depends_on = [ + "alicloud_vswitch.foo"] +} +resource "alicloud_snat_entry" "foo"{ + snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}" + source_vswitch_id = "${alicloud_vswitch.foo.id}" + snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" +} +` + +const testAccSnatEntryUpdate = ` +data "alicloud_zones" "default" { + "available_resource_creation"= "VSwitch" +} + +resource "alicloud_vpc" "foo" { + name = "tf_test_foo" + cidr_block = "172.16.0.0/12" +} + +resource "alicloud_vswitch" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + cidr_block = "172.16.0.0/21" + availability_zone = "${data.alicloud_zones.default.zones.2.id}" +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + bandwidth_packages = [{ + ip_count = 2 + bandwidth = 5 + zone = "${data.alicloud_zones.default.zones.2.id}" + },{ + ip_count = 1 + bandwidth = 6 + zone = "${data.alicloud_zones.default.zones.2.id}" + }] + depends_on = [ + "alicloud_vswitch.foo"] +} +resource "alicloud_snat_entry" "foo"{ + snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}" + source_vswitch_id = "${alicloud_vswitch.foo.id}" + snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.1.public_ip_addresses}" +} +` diff --git a/builtin/providers/alicloud/resource_alicloud_vpc.go b/builtin/providers/alicloud/resource_alicloud_vpc.go index e59e3b53b..7418395fd 100644 --- a/builtin/providers/alicloud/resource_alicloud_vpc.go +++ b/builtin/providers/alicloud/resource_alicloud_vpc.go @@ -86,7 +86,7 @@ func resourceAliyunVpcCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Timeout when WaitForVpcAvailable") } - return resourceAliyunVpcRead(d, meta) + return resourceAliyunVpcUpdate(d, meta) } func resourceAliyunVpcRead(d *schema.ResourceData, meta interface{}) error { @@ -144,7 +144,7 @@ func resourceAliyunVpcUpdate(d *schema.ResourceData, meta interface{}) error { d.Partial(false) - return nil + return resourceAliyunVpcRead(d, meta) } func resourceAliyunVpcDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/builtin/providers/alicloud/resource_alicloud_vswitch.go b/builtin/providers/alicloud/resource_alicloud_vswitch.go index 74d4c6a88..d4149b9ca 100644 --- a/builtin/providers/alicloud/resource_alicloud_vswitch.go +++ b/builtin/providers/alicloud/resource_alicloud_vswitch.go @@ -68,7 +68,7 @@ func resourceAliyunSwitchCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("WaitForVSwitchAvailable got a error: %s", err) } - return resourceAliyunSwitchRead(d, meta) + return resourceAliyunSwitchUpdate(d, meta) } func resourceAliyunSwitchRead(d *schema.ResourceData, meta interface{}) error { @@ -139,7 +139,7 @@ func resourceAliyunSwitchUpdate(d *schema.ResourceData, meta interface{}) error d.Partial(false) - return nil + return resourceAliyunSwitchRead(d, meta) } func resourceAliyunSwitchDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/builtin/providers/alicloud/service_alicloud_ecs.go b/builtin/providers/alicloud/service_alicloud_ecs.go index 4ff0e5f04..79b6b07fb 100644 --- a/builtin/providers/alicloud/service_alicloud_ecs.go +++ b/builtin/providers/alicloud/service_alicloud_ecs.go @@ -131,7 +131,7 @@ func (client *AliyunClient) QueryInstancesById(id string) (instance *ecs.Instanc } if len(instances) == 0 { - return nil, common.GetClientErrorFromString(InstanceNotfound) + return nil, GetNotFoundErrorFromString(InstanceNotfound) } return &instances[0], nil @@ -244,7 +244,7 @@ func (client *AliyunClient) DescribeSecurityGroupRule(securityGroupId, direction return &p, nil } } - return nil, nil + return nil, GetNotFoundErrorFromString("Security group rule not found") } diff --git a/builtin/providers/alicloud/service_alicloud_ess.go b/builtin/providers/alicloud/service_alicloud_ess.go new file mode 100644 index 000000000..69d514ef2 --- /dev/null +++ b/builtin/providers/alicloud/service_alicloud_ess.go @@ -0,0 +1,167 @@ +package alicloud + +import ( + "github.com/denverdino/aliyungo/ess" +) + +func (client *AliyunClient) DescribeScalingGroupById(sgId string) (*ess.ScalingGroupItemType, error) { + args := ess.DescribeScalingGroupsArgs{ + RegionId: client.Region, + ScalingGroupId: []string{sgId}, + } + + sgs, _, err := client.essconn.DescribeScalingGroups(&args) + if err != nil { + return nil, err + } + + if len(sgs) == 0 { + return nil, GetNotFoundErrorFromString("Scaling group not found") + } + + return &sgs[0], nil +} + +func (client *AliyunClient) DeleteScalingGroupById(sgId string) error { + args := ess.DeleteScalingGroupArgs{ + ScalingGroupId: sgId, + ForceDelete: true, + } + + _, err := client.essconn.DeleteScalingGroup(&args) + return err +} + +func (client *AliyunClient) DescribeScalingConfigurationById(sgId, configId string) (*ess.ScalingConfigurationItemType, error) { + args := ess.DescribeScalingConfigurationsArgs{ + RegionId: client.Region, + ScalingGroupId: sgId, + ScalingConfigurationId: []string{configId}, + } + + cs, _, err := client.essconn.DescribeScalingConfigurations(&args) + if err != nil { + return nil, err + } + + if len(cs) == 0 { + return nil, GetNotFoundErrorFromString("Scaling configuration not found") + } + + return &cs[0], nil +} + +func (client *AliyunClient) ActiveScalingConfigurationById(sgId, configId string) error { + args := ess.ModifyScalingGroupArgs{ + ScalingGroupId: sgId, + ActiveScalingConfigurationId: configId, + } + + _, err := client.essconn.ModifyScalingGroup(&args) + return err +} + +func (client *AliyunClient) EnableScalingConfigurationById(sgId, configId string, ids []string) error { + args := ess.EnableScalingGroupArgs{ + ScalingGroupId: sgId, + ActiveScalingConfigurationId: configId, + } + + if len(ids) > 0 { + args.InstanceId = ids + } + + _, err := client.essconn.EnableScalingGroup(&args) + return err +} + +func (client *AliyunClient) DisableScalingConfigurationById(sgId string) error { + args := ess.DisableScalingGroupArgs{ + ScalingGroupId: sgId, + } + + _, err := client.essconn.DisableScalingGroup(&args) + return err +} + +func (client *AliyunClient) DeleteScalingConfigurationById(sgId, configId string) error { + args := ess.DeleteScalingConfigurationArgs{ + ScalingGroupId: sgId, + ScalingConfigurationId: configId, + } + + _, err := client.essconn.DeleteScalingConfiguration(&args) + return err +} + +// Flattens an array of datadisk into a []map[string]interface{} +func flattenDataDiskMappings(list []ess.DataDiskItemType) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "size": i.Size, + "category": i.Category, + "snapshot_id": i.SnapshotId, + "device": i.Device, + } + result = append(result, l) + } + return result +} + +func (client *AliyunClient) DescribeScalingRuleById(sgId, ruleId string) (*ess.ScalingRuleItemType, error) { + args := ess.DescribeScalingRulesArgs{ + RegionId: client.Region, + ScalingGroupId: sgId, + ScalingRuleId: []string{ruleId}, + } + + cs, _, err := client.essconn.DescribeScalingRules(&args) + if err != nil { + return nil, err + } + + if len(cs) == 0 { + return nil, GetNotFoundErrorFromString("Scaling rule not found") + } + + return &cs[0], nil +} + +func (client *AliyunClient) DeleteScalingRuleById(ruleId string) error { + args := ess.DeleteScalingRuleArgs{ + RegionId: client.Region, + ScalingRuleId: ruleId, + } + + _, err := client.essconn.DeleteScalingRule(&args) + return err +} + +func (client *AliyunClient) DescribeScheduleById(scheduleId string) (*ess.ScheduledTaskItemType, error) { + args := ess.DescribeScheduledTasksArgs{ + RegionId: client.Region, + ScheduledTaskId: []string{scheduleId}, + } + + cs, _, err := client.essconn.DescribeScheduledTasks(&args) + if err != nil { + return nil, err + } + + if len(cs) == 0 { + return nil, GetNotFoundErrorFromString("Schedule not found") + } + + return &cs[0], nil +} + +func (client *AliyunClient) DeleteScheduleById(scheduleId string) error { + args := ess.DeleteScheduledTaskArgs{ + RegionId: client.Region, + ScheduledTaskId: scheduleId, + } + + _, err := client.essconn.DeleteScheduledTask(&args) + return err +} diff --git a/builtin/providers/alicloud/service_alicloud_rds.go b/builtin/providers/alicloud/service_alicloud_rds.go index 903374fe6..700a5d138 100644 --- a/builtin/providers/alicloud/service_alicloud_rds.go +++ b/builtin/providers/alicloud/service_alicloud_rds.go @@ -6,7 +6,20 @@ import ( "strings" ) -// when getInstance is empty, then throw InstanceNotfound error +// +// _______________ _______________ _______________ +// | | ______param______\ | | _____request_____\ | | +// | Business | | Service | | SDK/API | +// | | __________________ | | __________________ | | +// |______________| \ (obj, err) |______________| \ (status, cont) |______________| +// | | +// |A. {instance, nil} |a. {200, content} +// |B. {nil, error} |b. {200, nil} +// |c. {4xx, nil} +// +// The API return 200 for resource not found. +// When getInstance is empty, then throw InstanceNotfound error. +// That the business layer only need to check error. func (client *AliyunClient) DescribeDBInstanceById(id string) (instance *rds.DBInstanceAttribute, err error) { arrtArgs := rds.DescribeDBInstancesArgs{ DBInstanceId: id, @@ -19,7 +32,7 @@ func (client *AliyunClient) DescribeDBInstanceById(id string) (instance *rds.DBI attr := resp.Items.DBInstanceAttribute if len(attr) <= 0 { - return nil, common.GetClientErrorFromString(InstanceNotfound) + return nil, GetNotFoundErrorFromString("DB instance not found") } return &attr[0], nil @@ -164,13 +177,10 @@ func (client *AliyunClient) GetSecurityIps(instanceId string) ([]string, error) if err != nil { return nil, err } - ips := "" - for i, ip := range arr { - if i == 0 { - ips += ip.SecurityIPList - } else { - ips += COMMA_SEPARATED + ip.SecurityIPList - } + var ips, separator string + for _, ip := range arr { + ips += separator + ip.SecurityIPList + separator = COMMA_SEPARATED } return strings.Split(ips, COMMA_SEPARATED), nil } diff --git a/builtin/providers/alicloud/service_alicloud_vpc.go b/builtin/providers/alicloud/service_alicloud_vpc.go index 775fe112c..491ab034f 100644 --- a/builtin/providers/alicloud/service_alicloud_vpc.go +++ b/builtin/providers/alicloud/service_alicloud_vpc.go @@ -32,6 +32,7 @@ func (client *AliyunClient) DescribeNatGateway(natGatewayId string) (*ecs.NatGat } natGateways, _, err := client.vpcconn.DescribeNatGateways(args) + //fmt.Println("natGateways %#v", natGateways) if err != nil { return nil, err } @@ -64,6 +65,78 @@ func (client *AliyunClient) DescribeVpc(vpcId string) (*ecs.VpcSetType, error) { return &vpcs[0], nil } +func (client *AliyunClient) DescribeSnatEntry(snatTableId string, snatEntryId string) (ecs.SnatEntrySetType, error) { + + var resultSnat ecs.SnatEntrySetType + + args := &ecs.DescribeSnatTableEntriesArgs{ + RegionId: client.Region, + SnatTableId: snatTableId, + } + + snatEntries, _, err := client.vpcconn.DescribeSnatTableEntries(args) + + //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" + //so judge the snatEntries length priority + if len(snatEntries) == 0 { + return resultSnat, common.GetClientErrorFromString(InstanceNotfound) + } + + if err != nil { + return resultSnat, err + } + + findSnat := false + + for _, snat := range snatEntries { + if snat.SnatEntryId == snatEntryId { + resultSnat = snat + findSnat = true + } + } + if !findSnat { + return resultSnat, common.GetClientErrorFromString(NotFindSnatEntryBySnatId) + } + + return resultSnat, nil +} + +func (client *AliyunClient) DescribeForwardEntry(forwardTableId string, forwardEntryId string) (ecs.ForwardTableEntrySetType, error) { + + var resultFoward ecs.ForwardTableEntrySetType + + args := &ecs.DescribeForwardTableEntriesArgs{ + RegionId: client.Region, + ForwardTableId: forwardTableId, + } + + forwardEntries, _, err := client.vpcconn.DescribeForwardTableEntries(args) + + //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" + //so judge the snatEntries length priority + if len(forwardEntries) == 0 { + return resultFoward, common.GetClientErrorFromString(InstanceNotfound) + } + + findForward := false + + for _, forward := range forwardEntries { + if forward.ForwardEntryId == forwardEntryId { + resultFoward = forward + findForward = true + } + } + if !findForward { + return resultFoward, common.GetClientErrorFromString(NotFindForwardEntryByForwardId) + } + + if err != nil { + return resultFoward, err + } + + return resultFoward, nil +} + // describe vswitch by param filters func (client *AliyunClient) QueryVswitches(args *ecs.DescribeVSwitchesArgs) (vswitches []ecs.VSwitchSetType, err error) { vsws, _, err := client.ecsconn.DescribeVSwitches(args) @@ -130,7 +203,7 @@ func (client *AliyunClient) QueryRouteEntry(routeTableId, cidrBlock, nextHopType return &e, nil } } - return nil, nil + return nil, GetNotFoundErrorFromString("Vpc router entry not found") } func (client *AliyunClient) GetVpcIdByVSwitchId(vswitchId string) (vpcId string, err error) { diff --git a/builtin/providers/alicloud/struct_security_groups.go b/builtin/providers/alicloud/struct_security_groups.go deleted file mode 100644 index 678f68f7d..000000000 --- a/builtin/providers/alicloud/struct_security_groups.go +++ /dev/null @@ -1,11 +0,0 @@ -package alicloud - -// Takes the result of flatmap.Expand for an array of strings -// and returns a []string -func expandStringList(configured []interface{}) []string { - vs := make([]string, 0, len(configured)) - for _, v := range configured { - vs = append(vs, v.(string)) - } - return vs -} diff --git a/builtin/providers/alicloud/validators.go b/builtin/providers/alicloud/validators.go index 9687e68e8..4c3c82f3e 100644 --- a/builtin/providers/alicloud/validators.go +++ b/builtin/providers/alicloud/validators.go @@ -18,7 +18,7 @@ func validateInstancePort(v interface{}, k string) (ws []string, errors []error) value := v.(int) if value < 1 || value > 65535 { errors = append(errors, fmt.Errorf( - "%q must be a valid instance port between 1 and 65535", + "%q must be a valid port between 1 and 65535", k)) return } @@ -26,8 +26,8 @@ func validateInstancePort(v interface{}, k string) (ws []string, errors []error) } func validateInstanceProtocol(v interface{}, k string) (ws []string, errors []error) { - protocal := v.(string) - if !isProtocalValid(protocal) { + protocol := v.(string) + if !isProtocolValid(protocol) { errors = append(errors, fmt.Errorf( "%q is an invalid value. Valid values are either http, https, tcp or udp", k)) @@ -282,9 +282,9 @@ func validateInternetChargeType(v interface{}, k string) (ws []string, errors [] func validateInternetMaxBandWidthOut(v interface{}, k string) (ws []string, errors []error) { value := v.(int) - if value < 1 || value > 100 { + if value < 0 || value > 100 { errors = append(errors, fmt.Errorf( - "%q must be a valid internet bandwidth out between 1 and 1000", + "%q must be a valid internet bandwidth out between 0 and 100", k)) return } @@ -565,3 +565,14 @@ func validateRegion(v interface{}, k string) (ws []string, errors []error) { } return } + +func validateForwardPort(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "any" { + valueConv, err := strconv.Atoi(value) + if err != nil || valueConv < 1 || valueConv > 65535 { + errors = append(errors, fmt.Errorf("%q must be a valid port between 1 and 65535 or any ", k)) + } + } + return +} diff --git a/builtin/providers/alicloud/validators_test.go b/builtin/providers/alicloud/validators_test.go index 7d40de6b7..3160c496c 100644 --- a/builtin/providers/alicloud/validators_test.go +++ b/builtin/providers/alicloud/validators_test.go @@ -21,17 +21,17 @@ func TestValidateInstancePort(t *testing.T) { } func TestValidateInstanceProtocol(t *testing.T) { - validProtocals := []string{"http", "tcp", "https", "udp"} - for _, v := range validProtocals { - _, errors := validateInstanceProtocol(v, "instance_protocal") + validProtocols := []string{"http", "tcp", "https", "udp"} + for _, v := range validProtocols { + _, errors := validateInstanceProtocol(v, "instance_protocol") if len(errors) != 0 { t.Fatalf("%q should be a valid instance protocol: %q", v, errors) } } - invalidProtocals := []string{"HTTP", "abc", "ecmp", "dubbo"} - for _, v := range invalidProtocals { - _, errors := validateInstanceProtocol(v, "instance_protocal") + invalidProtocols := []string{"HTTP", "abc", "ecmp", "dubbo"} + for _, v := range invalidProtocols { + _, errors := validateInstanceProtocol(v, "instance_protocol") if len(errors) == 0 { t.Fatalf("%q should be an invalid instance protocol", v) } @@ -353,7 +353,7 @@ func TestValidateInternetMaxBandWidthOut(t *testing.T) { } } - invalidInternetMaxBandWidthOut := []int{-2, 0, 101, 123} + invalidInternetMaxBandWidthOut := []int{-2, 101, 123} for _, v := range invalidInternetMaxBandWidthOut { _, errors := validateInternetMaxBandWidthOut(v, "internet_max_bandwidth_out") if len(errors) == 0 { diff --git a/examples/alicloud-ess-scaling/README.md b/examples/alicloud-ess-scaling/README.md new file mode 100644 index 000000000..3d9d2abb3 --- /dev/null +++ b/examples/alicloud-ess-scaling/README.md @@ -0,0 +1,17 @@ +### ESS scaling configuration Example + +The example launches ESS scaling configuration, will create ECS instance automatic by system schedule. + +### Get up and running + +* Planning phase + + terraform plan + +* Apply phase + + terraform apply + +* Destroy + + terraform destroy \ No newline at end of file diff --git a/examples/alicloud-ess-scaling/main.tf b/examples/alicloud-ess-scaling/main.tf new file mode 100644 index 000000000..0f7575bc9 --- /dev/null +++ b/examples/alicloud-ess-scaling/main.tf @@ -0,0 +1,38 @@ +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "sg" { + name = "${var.security_group_name}" + description = "tf-sg" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.sg.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "scaling" { + min_size = "${var.scaling_min_size}" + max_size = "${var.scaling_max_size}" + scaling_group_name = "tf-scaling" + removal_policies = "${var.removal_policies}" + +} + +resource "alicloud_ess_scaling_configuration" "config" { + scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}" + enable = "${var.enable}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "${var.ecs_instance_type}" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.sg.id}" +} \ No newline at end of file diff --git a/examples/alicloud-ess-scaling/outputs.tf b/examples/alicloud-ess-scaling/outputs.tf new file mode 100644 index 000000000..c4bfbb73e --- /dev/null +++ b/examples/alicloud-ess-scaling/outputs.tf @@ -0,0 +1,7 @@ +output "scaling_group_id" { + value = "${alicloud_ess_scaling_group.scaling.id}" +} + +output "configuration_id" { + value = "${alicloud_ess_scaling_configuration.config.id}" +} \ No newline at end of file diff --git a/examples/alicloud-ess-scaling/variables.tf b/examples/alicloud-ess-scaling/variables.tf new file mode 100644 index 000000000..11d2ef567 --- /dev/null +++ b/examples/alicloud-ess-scaling/variables.tf @@ -0,0 +1,24 @@ +variable "security_group_name" { + default = "tf-sg" +} + +variable "scaling_min_size" { + default = 1 +} + +variable "scaling_max_size" { + default = 1 +} + +variable "enable" { + default = true +} + +variable "removal_policies" { + type = "list" + default = ["OldestInstance", "NewestInstance"] +} + +variable "ecs_instance_type" { + default = "ecs.s2.large" +} \ No newline at end of file diff --git a/examples/alicloud-ess-schedule/README.md b/examples/alicloud-ess-schedule/README.md new file mode 100644 index 000000000..e606951b1 --- /dev/null +++ b/examples/alicloud-ess-schedule/README.md @@ -0,0 +1,17 @@ +### ESS scaling schedule Example + +The example launches ESS schedule task, which will create ECS by the schedule time. + +### Get up and running + +* Planning phase + + terraform plan + +* Apply phase + + terraform apply + +* Destroy + + terraform destroy \ No newline at end of file diff --git a/examples/alicloud-ess-schedule/main.tf b/examples/alicloud-ess-schedule/main.tf new file mode 100644 index 000000000..339ba670b --- /dev/null +++ b/examples/alicloud-ess-schedule/main.tf @@ -0,0 +1,51 @@ +data "alicloud_images" "ecs_image" { + most_recent = true + name_regex = "^centos_6\\w{1,5}[64].*" +} + +resource "alicloud_security_group" "sg" { + name = "${var.security_group_name}" + description = "tf-sg" +} + +resource "alicloud_security_group_rule" "ssh-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "internet" + policy = "accept" + port_range = "22/22" + priority = 1 + security_group_id = "${alicloud_security_group.sg.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_ess_scaling_group" "scaling" { + min_size = "${var.scaling_min_size}" + max_size = "${var.scaling_max_size}" + scaling_group_name = "tf-scaling" + removal_policies = "${var.removal_policies}" + +} + +resource "alicloud_ess_scaling_configuration" "config" { + scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}" + enable = "${var.enable}" + + image_id = "${data.alicloud_images.ecs_image.images.0.id}" + instance_type = "${var.ecs_instance_type}" + io_optimized = "optimized" + security_group_id = "${alicloud_security_group.sg.id}" +} + +resource "alicloud_ess_scaling_rule" "rule" { + scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}" + adjustment_type = "TotalCapacity" + adjustment_value = "${var.rule_adjust_size}" + cooldown = 60 +} + +resource "alicloud_ess_schedule" "run" { + scheduled_action = "${alicloud_ess_scaling_rule.rule.ari}" + launch_time = "${var.schedule_launch_time}" + scheduled_task_name = "tf-run" +} \ No newline at end of file diff --git a/examples/alicloud-ess-schedule/outputs.tf b/examples/alicloud-ess-schedule/outputs.tf new file mode 100644 index 000000000..1d48aabfd --- /dev/null +++ b/examples/alicloud-ess-schedule/outputs.tf @@ -0,0 +1,11 @@ +output "scaling_group_id" { + value = "${alicloud_ess_scaling_group.scaling.id}" +} + +output "configuration_id" { + value = "${alicloud_ess_scaling_configuration.config.id}" +} + +output "configuration_ari" { + value = "${alicloud_ess_scaling_configuration.config.ari}" +} \ No newline at end of file diff --git a/examples/alicloud-ess-schedule/variables.tf b/examples/alicloud-ess-schedule/variables.tf new file mode 100644 index 000000000..fb95b6bee --- /dev/null +++ b/examples/alicloud-ess-schedule/variables.tf @@ -0,0 +1,32 @@ +variable "security_group_name" { + default = "tf-sg" +} + +variable "scaling_min_size" { + default = 1 +} + +variable "scaling_max_size" { + default = 1 +} + +variable "enable" { + default = true +} + +variable "removal_policies" { + type = "list" + default = ["OldestInstance", "NewestInstance"] +} + +variable "ecs_instance_type" { + default = "ecs.s2.large" +} + +variable "rule_adjust_size" { + default = 3 +} + +variable "schedule_launch_time" { + default = "2017-04-01T01:59Z" +} \ No newline at end of file diff --git a/examples/alicloud-vpc-snat/main.tf b/examples/alicloud-vpc-snat/main.tf new file mode 100644 index 000000000..1f204432a --- /dev/null +++ b/examples/alicloud-vpc-snat/main.tf @@ -0,0 +1,87 @@ +provider "alicloud" { + region = "cn-hangzhou" +} + +data "alicloud_instance_types" "1c2g" { + cpu_core_count = 1 + memory_size = 2 + instance_type_family = "ecs.n1" +} + +data "alicloud_zones" "default" { + "available_instance_type"= "${data.alicloud_instance_types.1c2g.instance_types.0.id}" + "available_disk_category"= "${var.disk_category}" +} + +resource "alicloud_vpc" "default" { + name = "tf_vpc" + cidr_block = "${var.vpc_cidr}" +} + +resource "alicloud_vswitch" "default" { + vpc_id = "${alicloud_vpc.default.id}" + cidr_block = "${var.vswitch_cidr}" + availability_zone = "${data.alicloud_zones.default.zones.0.id}" +} + +resource "alicloud_nat_gateway" "default" { + vpc_id = "${alicloud_vpc.default.id}" + spec = "Small" + name = "test_foo" + bandwidth_packages = [{ + ip_count = 2 + bandwidth = 5 + zone = "${data.alicloud_zones.default.zones.0.id}" + }] + depends_on = [ + "alicloud_vswitch.default"] +} +resource "alicloud_snat_entry" "default"{ + snat_table_id = "${alicloud_nat_gateway.default.snat_table_ids}" + source_vswitch_id = "${alicloud_vswitch.default.id}" + snat_ip = "${element(split(",", alicloud_nat_gateway.default.bandwidth_packages.0.public_ip_addresses),0)}" +} + +resource "alicloud_forward_entry" "default"{ + forward_table_id = "${alicloud_nat_gateway.default.forward_table_ids}" + external_ip = "${element(split(",", alicloud_nat_gateway.default.bandwidth_packages.0.public_ip_addresses),1)}" + external_port = "80" + ip_protocol = "tcp" + internal_ip = "${alicloud_instance.default.private_ip}" + internal_port = "8080" +} + +resource "alicloud_security_group" "sg" { + name = "tf_sg" + description = "tf_sg" + vpc_id = "${alicloud_vpc.default.id}" +} + +resource "alicloud_security_group_rule" "http-in" { + type = "ingress" + ip_protocol = "tcp" + nic_type = "intranet" + policy = "accept" + port_range = "80/80" + priority = 1 + security_group_id = "${alicloud_security_group.sg.id}" + cidr_ip = "0.0.0.0/0" +} + +resource "alicloud_instance" "default" { + # cn-beijing + availability_zone = "${data.alicloud_zones.default.zones.0.id}" + security_groups = ["${alicloud_security_group.sg.id}"] + + vswitch_id = "${alicloud_vswitch.default.id}" + + # series II + instance_charge_type = "PostPaid" + instance_type = "${var.instance_type}" + internet_max_bandwidth_out = 0 + io_optimized = "${var.io_optimized}" + + system_disk_category = "cloud_efficiency" + image_id = "${var.image_id}" + instance_name = "tf_vpc_snat" +} \ No newline at end of file diff --git a/examples/alicloud-vpc-snat/ouputs.tf b/examples/alicloud-vpc-snat/ouputs.tf new file mode 100644 index 000000000..f9a55914f --- /dev/null +++ b/examples/alicloud-vpc-snat/ouputs.tf @@ -0,0 +1,7 @@ +output "instance_id" { + value = "${alicloud_instance.default.id}" +} + +output "bindwidth_package_ip" { + value = "${alicloud_nat_gateway.default.bandwidth_packages.0.public_ip_addresses}" +} diff --git a/examples/alicloud-vpc-snat/variables.tf b/examples/alicloud-vpc-snat/variables.tf new file mode 100644 index 000000000..9e9eb76db --- /dev/null +++ b/examples/alicloud-vpc-snat/variables.tf @@ -0,0 +1,22 @@ + +variable "vpc_cidr" { + default = "10.1.0.0/21" +} +variable "vswitch_cidr" { + default = "10.1.1.0/24" +} +variable "rule_policy" { + default = "accept" +} +variable "instance_type" { + default = "ecs.n1.small" +} +variable "image_id" { + default = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" +} +variable "io_optimized" { + default = "optimized" +} +variable "disk_category"{ + default = "cloud_efficiency" +} \ No newline at end of file diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go index 69a9c3d1e..d186ebd82 100755 --- a/vendor/github.com/denverdino/aliyungo/common/client.go +++ b/vendor/github.com/denverdino/aliyungo/common/client.go @@ -13,6 +13,14 @@ import ( "github.com/denverdino/aliyungo/util" ) +// RemovalPolicy.N add index to array item +// RemovalPolicy=["a", "b"] => RemovalPolicy.1="a" RemovalPolicy.2="b" +type FlattenArray []string + +// string contains underline which will be replaced with dot +// SystemDisk_Category => SystemDisk.Category +type UnderlineString string + // A Client represents a client of ECS services type Client struct { AccessKeyId string //Access Key Id @@ -167,6 +175,75 @@ func (client *Client) Invoke(action string, args interface{}, response interface return nil } +// Invoke sends the raw HTTP request for ECS services +func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + query := util.ConvertToQueryValues(request) + + util.SetQueryValueByFlattenMethod(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + // Invoke sends the raw HTTP request for ECS services //改进了一下上面那个方法,可以使用各种Http方法 //2017.1.30 增加了一个path参数,用来拓展访问的地址 diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml index 8e781ac46..4079bcd2b 100644 --- a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml +++ b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml @@ -32,7 +32,6 @@ Smssms.aliyuncs.com Jaqjaq.aliyuncs.com HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com ChargingServicechargingservice.aliyuncs.com Msgmsg-inner.aliyuncs.com @@ -63,11 +62,9 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com HighDDosyd-highddos-cn-hangzhou.aliyuncs.com CmsSiteMonitorsitemonitor.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com BatchComputebatchCompute.aliyuncs.com CFcf.aliyuncs.com Drdsdrds.aliyuncs.com @@ -127,7 +124,7 @@ Smssms.aliyuncs.com Jaqjaq.aliyuncs.com CScs.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com + Kmskms.cn-hongkong.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -158,11 +155,11 @@ Qualitycheckqualitycheck.aliyuncs.com Bssbss.aliyuncs.com Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com + CloudAPIapigateway.cn-hongkong.aliyuncs.com Stssts.aliyuncs.com CmsSiteMonitorsitemonitor.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-hongkong.aliyuncs.com Location-innerlocation-inner.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com @@ -235,7 +232,6 @@ Smssms.aliyuncs.com Drdsdrds.aliyuncs.com HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -265,9 +261,8 @@ Qualitycheckqualitycheck.aliyuncs.com Bssbss.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-qingdao.aliyuncs.com CFcf.aliyuncs.com Httpdnshttpdns-api.aliyuncs.com Location-innerlocation-inner.aliyuncs.com @@ -330,6 +325,7 @@ cn-shanghai + ARMSarms.cn-shanghai.aliyuncs.com Riskrisk-cn-hangzhou.aliyuncs.com COScos.aliyuncs.com HPChpc.aliyuncs.com @@ -371,11 +367,11 @@ Qualitycheckqualitycheck.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com Apigatewayapigateway.cn-shanghai.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com + CloudAPIapigateway.cn-shanghai.aliyuncs.com Stssts.aliyuncs.com Vpcvpc.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shanghai.aliyuncs.com Ddsmongodb.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com @@ -403,6 +399,7 @@ Essess.aliyuncs.com Ossoss-cn-shanghai.aliyuncs.com YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + vodvod.cn-shanghai.aliyuncs.com @@ -419,7 +416,6 @@ Smssms.aliyuncs.com Salessales.cn-hangzhou.aliyuncs.com HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -447,10 +443,9 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com Stssts.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shenzhen.aliyuncs.com CFcf.aliyuncs.com Httpdnshttpdns-api.aliyuncs.com Greengreen.aliyuncs.com @@ -504,7 +499,6 @@ Jaqjaq.aliyuncs.com Pushcloudpush.aliyuncs.com Alidnsalidns.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -534,11 +528,10 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com HighDDosyd-highddos-cn-hangzhou.aliyuncs.com CmsSiteMonitorsitemonitor.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.us-west-1.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com Httpdnshttpdns-api.aliyuncs.com @@ -579,7 +572,6 @@ Smssms.aliyuncs.com Drdsdrds.aliyuncs.com HPChpc.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com ChargingServicechargingservice.aliyuncs.com Msgmsg-inner.aliyuncs.com @@ -610,10 +602,9 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com HighDDosyd-highddos-cn-hangzhou.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shanghai.aliyuncs.com CFcf.aliyuncs.com Httpdnshttpdns-api.aliyuncs.com Location-innerlocation-inner.aliyuncs.com @@ -652,6 +643,7 @@ cn-hangzhou + ARMSarms.cn-hangzhou.aliyuncs.com CScs.aliyuncs.com COScos.aliyuncs.com Essess.aliyuncs.com @@ -743,7 +735,6 @@ Smssms.aliyuncs.com Drdsdrds.aliyuncs.com CScs.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com ChargingServicechargingservice.aliyuncs.com Msgmsg-inner.aliyuncs.com @@ -774,10 +765,9 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com Stssts.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-beijing.aliyuncs.com Location-innerlocation-inner.aliyuncs.com CFcf.aliyuncs.com Httpdnshttpdns-api.aliyuncs.com @@ -819,6 +809,7 @@ cn-shenzhen + ARMSarms.cn-shenzhen.aliyuncs.com CScs.aliyuncs.com COScos.aliyuncs.com Onsons.aliyuncs.com @@ -859,7 +850,7 @@ Stssts.aliyuncs.com Vpcvpc.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shenzhen.aliyuncs.com Oascn-shenzhen.oas.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com @@ -908,7 +899,6 @@ Smssms.aliyuncs.com Jaqjaq.aliyuncs.com Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Essess.aliyuncs.com R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com @@ -937,7 +927,7 @@ CloudAPIapigateway.cn-qingdao.aliyuncs.com Stssts.aliyuncs.com Rdsrds.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-qingdao.aliyuncs.com Location-innerlocation-inner.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com @@ -1003,6 +993,7 @@ cn-beijing + ARMSarms.cn-beijing.aliyuncs.com CScs.aliyuncs.com COScos.aliyuncs.com Jaqjaq.aliyuncs.com @@ -1046,12 +1037,12 @@ PTSpts.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com Apigatewayapigateway.cn-beijing.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com + CloudAPIapigateway.cn-beijing.aliyuncs.com Kmskms.cn-beijing.aliyuncs.com HighDDosyd-highddos-cn-hangzhou.aliyuncs.com CmsSiteMonitorsitemonitor.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.cn-beijing.aliyuncs.com CFcf.aliyuncs.com Acsacs.aliyun-inc.com Httpdnshttpdns-api.aliyuncs.com @@ -1077,6 +1068,7 @@ Yundunyundun-cn-hangzhou.aliyuncs.com Cdncdn.aliyuncs.com YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + vodvod.cn-beijing.aliyuncs.com @@ -1092,7 +1084,6 @@ Smssms.aliyuncs.com Salessales.cn-hangzhou.aliyuncs.com Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -1122,7 +1113,6 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com Ubsmsubsms.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com Rdsrds.aliyuncs.com Mtsmts.cn-hangzhou.aliyuncs.com Location-innerlocation-inner.aliyuncs.com @@ -1180,7 +1170,6 @@ Smssms.aliyuncs.com Jaqjaq.aliyuncs.com Dtsdts.aliyuncs.com - Kmskms.cn-hangzhou.aliyuncs.com Locationlocation.aliyuncs.com Msgmsg-inner.aliyuncs.com ChargingServicechargingservice.aliyuncs.com @@ -1210,7 +1199,6 @@ PTSpts.aliyuncs.com Qualitycheckqualitycheck.aliyuncs.com M-kvstorem-kvstore.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com Rdsrds.aliyuncs.com Mtsmts.cn-hangzhou.aliyuncs.com CFcf.aliyuncs.com @@ -1304,11 +1292,11 @@ Bssbss.aliyuncs.com Ubsmsubsms.aliyuncs.com Apigatewayapigateway.ap-southeast-1.aliyuncs.com - CloudAPIapigateway.cn-hangzhou.aliyuncs.com + CloudAPIapigateway.ap-southeast-1.aliyuncs.com Stssts.aliyuncs.com CmsSiteMonitorsitemonitor.aliyuncs.com Aceace.cn-hangzhou.aliyuncs.com - Mtsmts.cn-hangzhou.aliyuncs.com + Mtsmts.ap-southeast-1.aliyuncs.com CFcf.aliyuncs.com Crmcrm-cn-hangzhou.aliyuncs.com Location-innerlocation-inner.aliyuncs.com @@ -1348,4 +1336,14 @@ Slbslb.eu-central-1.aliyuncs.com - \ No newline at end of file + + cn-zhangjiakou + + Rdsrds.cn-zhangjiakou.aliyuncs.com + Ecsecs.cn-zhangjiakou.aliyuncs.com + Vpcvpc.cn-zhangjiakou.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.cn-zhangjiakou.aliyuncs.com + + + diff --git a/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go new file mode 100644 index 000000000..2a316e18e --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go @@ -0,0 +1,104 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type CreateForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ExternalIp string + ExternalPort string + IpProtocol string + InternalIp string + InternalPort string +} + +type CreateForwardEntryResponse struct { + common.Response + ForwardEntryId string +} + +type DescribeForwardTableEntriesArgs struct { + RegionId common.Region + ForwardTableId string + common.Pagination +} + +type ForwardTableEntrySetType struct { + RegionId common.Region + ExternalIp string + ExternalPort string + ForwardEntryId string + ForwardTableId string + InternalIp string + InternalPort string + IpProtocol string + Status string +} + +type DescribeForwardTableEntriesResponse struct { + common.Response + common.PaginationResult + ForwardTableEntries struct { + ForwardTableEntry []ForwardTableEntrySetType + } +} + +type ModifyForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ForwardEntryId string + ExternalIp string + IpProtocol string + ExternalPort string + InternalIp string + InternalPort string +} + +type ModifyForwardEntryResponse struct { + common.Response +} + +type DeleteForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ForwardEntryId string +} + +type DeleteForwardEntryResponse struct { + common.Response +} + +func (client *Client) CreateForwardEntry(args *CreateForwardEntryArgs) (resp *CreateForwardEntryResponse, err error) { + response := CreateForwardEntryResponse{} + err = client.Invoke("CreateForwardEntry", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +func (client *Client) DescribeForwardTableEntries(args *DescribeForwardTableEntriesArgs) (forwardTableEntries []ForwardTableEntrySetType, + pagination *common.PaginationResult, err error) { + + args.Validate() + response := DescribeForwardTableEntriesResponse{} + + err = client.Invoke("DescribeForwardTableEntries", args, &response) + + if err != nil { + return nil, nil, err + } + + return response.ForwardTableEntries.ForwardTableEntry, &response.PaginationResult, nil +} + +func (client *Client) ModifyForwardEntry(args *ModifyForwardEntryArgs) error { + response := ModifyForwardEntryResponse{} + return client.Invoke("ModifyForwardEntry", args, &response) +} + +func (client *Client) DeleteForwardEntry(args *DeleteForwardEntryArgs) error { + response := DeleteForwardEntryResponse{} + err := client.Invoke("DeleteForwardEntry", args, &response) + return err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go index 7a62857cd..80faf21ca 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go @@ -79,6 +79,7 @@ type VpcSetType struct { CidrBlock string VRouterId string Description string + IsDefault bool CreationTime util.ISO6801Time } diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go index 8bf10394a..8a879ec80 100644 --- a/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go +++ b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go @@ -77,6 +77,7 @@ type VSwitchSetType struct { AvailableIpAddressCount int Description string VSwitchName string + IsDefault bool CreationTime util.ISO6801Time } diff --git a/vendor/github.com/denverdino/aliyungo/ess/client.go b/vendor/github.com/denverdino/aliyungo/ess/client.go new file mode 100644 index 000000000..a2d486546 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ess/client.go @@ -0,0 +1,48 @@ +package ess + +import ( + "github.com/denverdino/aliyungo/common" + + "os" +) + +type Client struct { + common.Client +} + +const ( + // ESSDefaultEndpoint is the default API endpoint of ESS services + ESSDefaultEndpoint = "https://ess.aliyuncs.com" + ESSAPIVersion = "2014-08-28" + ESSServiceCode = "ess" +) + +// NewClient creates a new instance of RDS client +func NewClient(accessKeyId, accessKeySecret string) *Client { + endpoint := os.Getenv("ESS_ENDPOINT") + if endpoint == "" { + endpoint = ESSDefaultEndpoint + } + return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret) +} + +func NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client { + client := &Client{} + client.Init(endpoint, ESSAPIVersion, accessKeyId, accessKeySecret) + return client +} + +func NewESSClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client { + endpoint := os.Getenv("ESS_ENDPOINT") + if endpoint == "" { + endpoint = ESSDefaultEndpoint + } + + return NewClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID) +} + +func NewClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client { + client := &Client{} + client.NewInit(endpoint, ESSAPIVersion, accessKeyId, accessKeySecret, ESSServiceCode, regionID) + return client +} diff --git a/vendor/github.com/denverdino/aliyungo/ess/configuration.go b/vendor/github.com/denverdino/aliyungo/ess/configuration.go new file mode 100644 index 000000000..7fdcd187c --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ess/configuration.go @@ -0,0 +1,127 @@ +package ess + +import ( + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ecs" +) + +type CreateScalingConfigurationArgs struct { + ScalingGroupId string + ImageId string + InstanceType string + IoOptimized ecs.IoOptimized + SecurityGroupId string + ScalingConfigurationName string + InternetChargeType common.InternetChargeType + InternetMaxBandwidthIn int + InternetMaxBandwidthOut int + SystemDisk_Category common.UnderlineString + SystemDisk_Size common.UnderlineString + DataDisk []DataDiskType +} + +type DataDiskType struct { + Category string + SnapshotId string + Device string + Size int +} + +type CreateScalingConfigurationResponse struct { + ScalingConfigurationId string + common.Response +} + +// CreateScalingConfiguration create scaling configuration +// +// You can read doc at https://help.aliyun.com/document_detail/25944.html?spm=5176.doc25942.6.625.KcE5ir +func (client *Client) CreateScalingConfiguration(args *CreateScalingConfigurationArgs) (resp *CreateScalingConfigurationResponse, err error) { + response := CreateScalingConfigurationResponse{} + err = client.InvokeByFlattenMethod("CreateScalingConfiguration", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DescribeScalingConfigurationsArgs struct { + RegionId common.Region + ScalingGroupId string + ScalingConfigurationId common.FlattenArray + ScalingConfigurationName common.FlattenArray + common.Pagination +} + +type DescribeScalingConfigurationsResponse struct { + common.Response + common.PaginationResult + ScalingConfigurations struct { + ScalingConfiguration []ScalingConfigurationItemType + } +} + +type ScalingConfigurationItemType struct { + ScalingConfigurationId string + ScalingConfigurationName string + ScalingGroupId string + ImageId string + InstanceType string + IoOptimized string + SecurityGroupId string + InternetChargeType string + LifecycleState LifecycleState + CreationTime string + InternetMaxBandwidthIn int + InternetMaxBandwidthOut int + SystemDiskCategory string + DataDisks struct { + DataDisk []DataDiskItemType + } +} + +type DataDiskItemType struct { + Size int + Category string + SnapshotId string + Device string +} + +// DescribeScalingConfigurations describes scaling configuration +// +// You can read doc at https://help.aliyun.com/document_detail/25945.html?spm=5176.doc25944.6.626.knG0zz +func (client *Client) DescribeScalingConfigurations(args *DescribeScalingConfigurationsArgs) (configs []ScalingConfigurationItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeScalingConfigurationsResponse{} + + err = client.InvokeByFlattenMethod("DescribeScalingConfigurations", args, &response) + + if err == nil { + return response.ScalingConfigurations.ScalingConfiguration, &response.PaginationResult, nil + } + + return nil, nil, err +} + +type DeleteScalingConfigurationArgs struct { + ScalingConfigurationId string + ScalingGroupId string + ImageId string +} + +type DeleteScalingConfigurationResponse struct { + common.Response +} + +// DeleteScalingConfiguration delete scaling configuration +// +// You can read doc at https://help.aliyun.com/document_detail/25946.html?spm=5176.doc25944.6.627.MjkuuL +func (client *Client) DeleteScalingConfiguration(args *DeleteScalingConfigurationArgs) (resp *DeleteScalingConfigurationResponse, err error) { + response := DeleteScalingConfigurationResponse{} + err = client.InvokeByFlattenMethod("DeleteScalingConfiguration", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ess/group.go b/vendor/github.com/denverdino/aliyungo/ess/group.go new file mode 100644 index 000000000..c24e074fd --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ess/group.go @@ -0,0 +1,242 @@ +package ess + +import "github.com/denverdino/aliyungo/common" + +type LifecycleState string + +const ( + Active = LifecycleState("Active") + Inacitve = LifecycleState("Inacitve") + Deleting = LifecycleState("Deleting") + InService = LifecycleState("InService") + Pending = LifecycleState("Pending") + Removing = LifecycleState("Removing") +) + +type CreateScalingGroupArgs struct { + RegionId common.Region + ScalingGroupName string + LoadBalancerId string + VpcId string + VSwitchId string + MaxSize int + MinSize int + DefaultCooldown int + RemovalPolicy common.FlattenArray + DBInstanceId common.FlattenArray +} + +type CreateScalingGroupResponse struct { + common.Response + ScalingGroupId string +} + +// CreateScalingGroup create scaling group +// +// You can read doc at https://help.aliyun.com/document_detail/25936.html?spm=5176.doc25940.6.617.vm6LXF +func (client *Client) CreateScalingGroup(args *CreateScalingGroupArgs) (resp *CreateScalingGroupResponse, err error) { + response := CreateScalingGroupResponse{} + err = client.InvokeByFlattenMethod("CreateScalingGroup", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type ModifyScalingGroupArgs struct { + ScalingGroupId string + ScalingGroupName string + ActiveScalingConfigurationId string + MinSize int + MaxSize int + DefaultCooldown int + RemovalPolicy common.FlattenArray +} + +type ModifyScalingGroupResponse struct { + common.Response +} + +// ModifyScalingGroup modify scaling group +// +// You can read doc at https://help.aliyun.com/document_detail/25937.html?spm=5176.doc25936.6.618.iwDcXT +func (client *Client) ModifyScalingGroup(args *ModifyScalingGroupArgs) (resp *ModifyScalingGroupResponse, err error) { + response := ModifyScalingGroupResponse{} + err = client.InvokeByFlattenMethod("ModifyScalingGroup", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DescribeScalingGroupsArgs struct { + RegionId common.Region + ScalingGroupId common.FlattenArray + ScalingGroupName common.FlattenArray + common.Pagination +} + +type DescribeInstancesResponse struct { + common.Response + common.PaginationResult + ScalingGroups struct { + ScalingGroup []ScalingGroupItemType + } +} + +type ScalingGroupItemType struct { + ScalingGroupId string + ScalingGroupName string + ActiveScalingConfigurationId string + RegionId string + LoadBalancerId string + VSwitchId string + CreationTime string + LifecycleState LifecycleState + MinSize int + MaxSize int + DefaultCooldown int + TotalCapacity int + ActiveCapacity int + PendingCapacity int + RemovingCapacity int + RemovalPolicies RemovalPolicySetType + DBInstanceIds DBInstanceIdSetType +} + +type RemovalPolicySetType struct { + RemovalPolicy []string +} + +type DBInstanceIdSetType struct { + DBInstanceId []string +} + +// DescribeScalingGroups describes scaling groups +// +// You can read doc at https://help.aliyun.com/document_detail/25938.html?spm=5176.doc25937.6.619.sUUOT7 +func (client *Client) DescribeScalingGroups(args *DescribeScalingGroupsArgs) (groups []ScalingGroupItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeInstancesResponse{} + + err = client.InvokeByFlattenMethod("DescribeScalingGroups", args, &response) + + if err == nil { + return response.ScalingGroups.ScalingGroup, &response.PaginationResult, nil + } + + return nil, nil, err +} + +type DescribeScalingInstancesArgs struct { + RegionId common.Region + ScalingGroupId string + ScalingConfigurationId string + HealthStatus string + CreationType string + LifecycleState LifecycleState + InstanceId common.FlattenArray + common.Pagination +} + +type DescribeScalingInstancesResponse struct { + common.Response + common.PaginationResult + ScalingInstances struct { + ScalingInstance []ScalingInstanceItemType + } +} + +type ScalingInstanceItemType struct { + InstanceId string + ScalingGroupId string + ScalingConfigurationId string + HealthStatus string + CreationTime string + CreationType string + LifecycleState LifecycleState +} + +// DescribeScalingInstances describes scaling instances +// +// You can read doc at https://help.aliyun.com/document_detail/25942.html?spm=5176.doc25941.6.623.2xA0Uj +func (client *Client) DescribeScalingInstances(args *DescribeScalingInstancesArgs) (instances []ScalingInstanceItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeScalingInstancesResponse{} + + err = client.InvokeByFlattenMethod("DescribeScalingInstances", args, &response) + + if err == nil { + return response.ScalingInstances.ScalingInstance, &response.PaginationResult, nil + } + + return nil, nil, err +} + +type EnableScalingGroupArgs struct { + ScalingGroupId string + ActiveScalingConfigurationId string + InstanceId common.FlattenArray +} + +type EnableScalingGroupResponse struct { + common.Response +} + +// EnableScalingGroup enable scaling group +// +// You can read doc at https://help.aliyun.com/document_detail/25939.html?spm=5176.doc25938.6.620.JiJhkx +func (client *Client) EnableScalingGroup(args *EnableScalingGroupArgs) (resp *EnableScalingGroupResponse, err error) { + response := EnableScalingGroupResponse{} + err = client.InvokeByFlattenMethod("EnableScalingGroup", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DisableScalingGroupArgs struct { + ScalingGroupId string +} + +type DisableScalingGroupResponse struct { + common.Response +} + +// DisableScalingGroup disable scaling group +// +// You can read doc at https://help.aliyun.com/document_detail/25940.html?spm=5176.doc25939.6.621.M8GuuY +func (client *Client) DisableScalingGroup(args *DisableScalingGroupArgs) (resp *DisableScalingGroupResponse, err error) { + response := DisableScalingGroupResponse{} + err = client.InvokeByFlattenMethod("DisableScalingGroup", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DeleteScalingGroupArgs struct { + ScalingGroupId string + ForceDelete bool +} + +type DeleteScalingGroupResponse struct { + common.Response +} + +// DeleteScalingGroup delete scaling group +// +// You can read doc at https://help.aliyun.com/document_detail/25941.html?spm=5176.doc25940.6.622.mRBCuw +func (client *Client) DeleteScalingGroup(args *DeleteScalingGroupArgs) (resp *DeleteScalingGroupResponse, err error) { + response := DeleteScalingGroupResponse{} + err = client.InvokeByFlattenMethod("DeleteScalingGroup", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ess/rule.go b/vendor/github.com/denverdino/aliyungo/ess/rule.go new file mode 100644 index 000000000..b6ce29002 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ess/rule.go @@ -0,0 +1,130 @@ +package ess + +import "github.com/denverdino/aliyungo/common" + +type AdjustmentType string + +const ( + QuantityChangeInCapacity = AdjustmentType("QuantityChangeInCapacity") + PercentChangeInCapacity = AdjustmentType("PercentChangeInCapacity") + TotalCapacity = AdjustmentType("TotalCapacity") +) + +type CreateScalingRuleArgs struct { + RegionId common.Region + ScalingGroupId string + AdjustmentType AdjustmentType + AdjustmentValue int + Cooldown int + ScalingRuleName string +} + +type CreateScalingRuleResponse struct { + common.Response + ScalingRuleId string + ScalingRuleAri string +} + +// CreateScalingRule create scaling rule +// +// You can read doc at https://help.aliyun.com/document_detail/25948.html?spm=5176.doc25944.6.629.FLkNnj +func (client *Client) CreateScalingRule(args *CreateScalingRuleArgs) (resp *CreateScalingRuleResponse, err error) { + response := CreateScalingRuleResponse{} + err = client.Invoke("CreateScalingRule", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type ModifyScalingRuleArgs struct { + RegionId common.Region + ScalingRuleId string + AdjustmentType AdjustmentType + AdjustmentValue int + Cooldown int + ScalingRuleName string +} + +type ModifyScalingRuleResponse struct { + common.Response +} + +// ModifyScalingRule modify scaling rule +// +// You can read doc at https://help.aliyun.com/document_detail/25949.html?spm=5176.doc25948.6.630.HGN1va +func (client *Client) ModifyScalingRule(args *ModifyScalingRuleArgs) (resp *ModifyScalingRuleResponse, err error) { + response := ModifyScalingRuleResponse{} + err = client.Invoke("ModifyScalingRule", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DescribeScalingRulesArgs struct { + common.Pagination + RegionId common.Region + ScalingGroupId string + ScalingRuleId common.FlattenArray + ScalingRuleName common.FlattenArray + ScalingRuleAri common.FlattenArray +} + +type DescribeScalingRulesResponse struct { + common.Response + common.PaginationResult + ScalingRules struct { + ScalingRule []ScalingRuleItemType + } +} + +type ScalingRuleItemType struct { + ScalingRuleId string + ScalingGroupId string + ScalingRuleName string + AdjustmentType string + ScalingRuleAri string + Cooldown int + AdjustmentValue int +} + +// DescribeScalingRules describes scaling rules +// +// You can read doc at https://help.aliyun.com/document_detail/25950.html?spm=5176.doc25949.6.631.RwPguo +func (client *Client) DescribeScalingRules(args *DescribeScalingRulesArgs) (configs []ScalingRuleItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeScalingRulesResponse{} + + err = client.InvokeByFlattenMethod("DescribeScalingRules", args, &response) + + if err == nil { + return response.ScalingRules.ScalingRule, &response.PaginationResult, nil + } + + return nil, nil, err +} + +type DeleteScalingRuleArgs struct { + RegionId common.Region + ScalingRuleId string +} + +type DeleteScalingRuleResponse struct { + common.Response +} + +// DeleteScalingRule delete scaling rule +// +// You can read doc at https://help.aliyun.com/document_detail/25951.html?spm=5176.doc25950.6.632.HbPLMZ +func (client *Client) DeleteScalingRule(args *DeleteScalingRuleArgs) (resp *DeleteScalingRuleResponse, err error) { + response := DeleteScalingRuleResponse{} + err = client.InvokeByFlattenMethod("DeleteScalingRule", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ess/schedule.go b/vendor/github.com/denverdino/aliyungo/ess/schedule.go new file mode 100644 index 000000000..9da5a86e0 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ess/schedule.go @@ -0,0 +1,140 @@ +package ess + +import "github.com/denverdino/aliyungo/common" + +type RecurrenceType string + +const ( + Daily = RecurrenceType("Daily") + Weekly = RecurrenceType("Weekly") + Monthly = RecurrenceType("Monthly") +) + +type CreateScheduledTaskArgs struct { + RegionId common.Region + ScheduledAction string + LaunchTime string + ScheduledTaskName string + Description string + LaunchExpirationTime int + RecurrenceType RecurrenceType + RecurrenceValue string + RecurrenceEndTime string + TaskEnabled bool +} + +type CreateScheduledTaskResponse struct { + common.Response + ScheduledTaskId string +} + +// CreateScheduledTask create schedule task +// +// You can read doc at https://help.aliyun.com/document_detail/25957.html?spm=5176.doc25950.6.638.FfQ0BR +func (client *Client) CreateScheduledTask(args *CreateScheduledTaskArgs) (resp *CreateScheduledTaskResponse, err error) { + response := CreateScheduledTaskResponse{} + err = client.Invoke("CreateScheduledTask", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type ModifyScheduledTaskArgs struct { + RegionId common.Region + ScheduledTaskId string + ScheduledAction string + LaunchTime string + ScheduledTaskName string + Description string + LaunchExpirationTime int + RecurrenceType RecurrenceType + RecurrenceValue string + RecurrenceEndTime string + TaskEnabled bool +} + +type ModifyScheduledTaskResponse struct { + common.Response +} + +// ModifyScheduledTask modify schedule task +// +// You can read doc at https://help.aliyun.com/document_detail/25958.html?spm=5176.doc25957.6.639.rgxQ1c +func (client *Client) ModifyScheduledTask(args *ModifyScheduledTaskArgs) (resp *ModifyScheduledTaskResponse, err error) { + response := ModifyScheduledTaskResponse{} + err = client.Invoke("ModifyScheduledTask", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} + +type DescribeScheduledTasksArgs struct { + RegionId common.Region + ScheduledTaskId common.FlattenArray + ScheduledTaskName common.FlattenArray + ScheduledAction common.FlattenArray + common.Pagination +} + +type DescribeScheduledTasksResponse struct { + common.Response + common.PaginationResult + ScheduledTasks struct { + ScheduledTask []ScheduledTaskItemType + } +} + +type ScheduledTaskItemType struct { + ScheduledTaskId string + ScheduledTaskName string + Description string + ScheduledAction string + LaunchTime string + RecurrenceType string + RecurrenceValue string + RecurrenceEndTime string + LaunchExpirationTime int + TaskEnabled bool +} + +// DescribeScheduledTasks describes scaling tasks +// +// You can read doc at https://help.aliyun.com/document_detail/25959.html?spm=5176.doc25958.6.640.cLccdR +func (client *Client) DescribeScheduledTasks(args *DescribeScheduledTasksArgs) (tasks []ScheduledTaskItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeScheduledTasksResponse{} + + err = client.InvokeByFlattenMethod("DescribeScheduledTasks", args, &response) + + if err == nil { + return response.ScheduledTasks.ScheduledTask, &response.PaginationResult, nil + } + + return nil, nil, err +} + +type DeleteScheduledTaskArgs struct { + RegionId common.Region + ScheduledTaskId string +} + +type DeleteScheduledTaskResponse struct { + common.Response +} + +// DeleteScheduledTask delete schedule task +// +// You can read doc at https://help.aliyun.com/document_detail/25960.html?spm=5176.doc25959.6.641.aGdNuW +func (client *Client) DeleteScheduledTask(args *DeleteScheduledTaskArgs) (resp *DeleteScheduledTaskResponse, err error) { + response := DeleteScheduledTaskResponse{} + err = client.Invoke("DeleteScheduledTask", args, &response) + + if err != nil { + return nil, err + } + return &response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/denverdino/aliyungo/util/encoding.go index e545e069d..8cb588288 100644 --- a/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ b/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -7,9 +7,26 @@ import ( "net/url" "reflect" "strconv" + "strings" "time" ) +// change instance=["a", "b"] +// to instance.1="a" instance.2="b" +func FlattenFn(fieldName string, field reflect.Value, values *url.Values) { + l := field.Len() + if l > 0 { + for i := 0; i < l; i++ { + str := field.Index(i).String() + values.Set(fieldName+"."+strconv.Itoa(i+1), str) + } + } +} + +func Underline2Dot(name string) string { + return strings.Replace(name, "_", ".", -1) +} + //ConvertToQueryValues converts the struct to url.Values func ConvertToQueryValues(ifc interface{}) url.Values { values := url.Values{} @@ -22,6 +39,10 @@ func SetQueryValues(ifc interface{}, values *url.Values) { setQueryValues(ifc, values, "") } +func SetQueryValueByFlattenMethod(ifc interface{}, values *url.Values) { + setQueryValuesByFlattenMethod(ifc, values, "") +} + func setQueryValues(i interface{}, values *url.Values, prefix string) { // add to support url.Values mapValues, ok := i.(url.Values) @@ -150,3 +171,144 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { } } } + +func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + + fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous + field := elem.Field(i) + + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + } + + var value string + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := field.Int() + if i != 0 { + value = strconv.FormatInt(i, 10) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := field.Uint() + if i != 0 { + value = strconv.FormatUint(i, 10) + } + case reflect.Float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case reflect.Float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case reflect.Bool: + value = strconv.FormatBool(field.Bool()) + case reflect.String: + value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } + case reflect.Slice: + if field.Type().Name() == "FlattenArray" { + FlattenFn(fieldName, field, values) + } else { + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + default: + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + //log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + } + continue + } + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: + + ifc := field.Interface() + if ifc != nil { + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + // NOTE: here we will change name to underline style when the type is UnderlineString + if field.Type().Name() == "UnderlineString" { + name = Underline2Dot(name) + } + values.Set(name, value) + } + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index f15250981..205e2eb02 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1300,34 +1300,40 @@ "revisionTime": "2016-10-29T20:57:26Z" }, { - "checksumSHA1": "SdiAYZOqWQ60ifRUHLwLiDMKMYA=", + "checksumSHA1": "4YIveqfMA1MH8oX8YMG7rDSl+ms=", "path": "github.com/denverdino/aliyungo/common", - "revision": "c4c75afbf7ea86e66672c1b6ed981385b4ad5ec2", - "revisionTime": "2017-03-21T07:55:32Z" + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" }, { - "checksumSHA1": "UVYu5rvfoXgJnIpUyGcaovMvpms=", + "checksumSHA1": "WkWWoA5aRYkE2apOEQdAOfn+9cc=", "path": "github.com/denverdino/aliyungo/ecs", - "revision": "c4c75afbf7ea86e66672c1b6ed981385b4ad5ec2", - "revisionTime": "2017-03-21T07:55:32Z" + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" + }, + { + "checksumSHA1": "BgIs8qwCMRM8xL6oLeo2Ki1QwBc=", + "path": "github.com/denverdino/aliyungo/ess", + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" }, { "checksumSHA1": "riQMe2AR7qkLRkQ/MSr8gQp3zL4=", "path": "github.com/denverdino/aliyungo/rds", - "revision": "c4c75afbf7ea86e66672c1b6ed981385b4ad5ec2", - "revisionTime": "2017-03-21T07:55:32Z" + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" }, { "checksumSHA1": "2g6VZONB51rul5YuSBvngH6u4A0=", "path": "github.com/denverdino/aliyungo/slb", - "revision": "c4c75afbf7ea86e66672c1b6ed981385b4ad5ec2", - "revisionTime": "2017-03-21T07:55:32Z" + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" }, { - "checksumSHA1": "Lp0KtT7ycgq31ox3Uzhpxyw0U+Y=", + "checksumSHA1": "piZlmhWPLGxYkXLysTrjcXllO4c=", "path": "github.com/denverdino/aliyungo/util", - "revision": "c4c75afbf7ea86e66672c1b6ed981385b4ad5ec2", - "revisionTime": "2017-03-21T07:55:32Z" + "revision": "afcc6903e3f10217da17e315558b3f829718ee04", + "revisionTime": "2017-04-13T09:54:00Z" }, { "checksumSHA1": "yDQQpeUxwqB3C+4opweg6znWJQk=", diff --git a/website/source/docs/providers/alicloud/r/db_instance.html.markdown b/website/source/docs/providers/alicloud/r/db_instance.html.markdown index ee3a78071..7580f61e6 100644 --- a/website/source/docs/providers/alicloud/r/db_instance.html.markdown +++ b/website/source/docs/providers/alicloud/r/db_instance.html.markdown @@ -6,7 +6,7 @@ description: |- Provides an RDS instance resource. --- -# alicloud_db_instance +# alicloud\_db\_instance Provides an RDS instance resource. A DB instance is an isolated database environment in the cloud. A DB instance can contain multiple user-created @@ -14,16 +14,14 @@ databases. ## Example Usage -```hcl +``` resource "alicloud_db_instance" "default" { - commodity_code = "rds" - - engine = "MySQL" - engine_version = "5.6" - - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - db_instance_net_type = "Intranet" + commodity_code = "rds" + engine = "MySQL" + engine_version = "5.6" + db_instance_class = "rds.mysql.t1.small" + db_instance_storage = "10" + db_instance_net_type = "Intranet" } ``` @@ -32,13 +30,13 @@ resource "alicloud_db_instance" "default" { The following arguments are supported: * `engine` - (Required) Database type. Value options: MySQL, SQLServer, PostgreSQL, and PPAS. -* `engine_version` - (Required) Database version. Value options: +* `engine_version` - (Required) Database version. Value options: - 5.5/5.6/5.7 for MySQL - 2008r2/2012 for SQLServer - 9.4 for PostgreSQL - 9.3 for PPAS * `db_instance_class` - (Required) Instance type. For details, see [Instance type table](https://intl.aliyun.com/help/doc-detail/26312.htm?spm=a3c0i.o26228en.a3.2.bRUHF3). -* `db_instance_storage` - (Required) User-defined storage space. Value range: +* `db_instance_storage` - (Required) User-defined storage space. Value range: - [5, 2000] for MySQL/PostgreSQL/PPAS HA dual node edition; - [20,1000] for MySQL 5.7 basic single node edition; - [10, 2000] for SQL Server 2008R2; @@ -65,7 +63,7 @@ The following arguments are supported: The database mapping supports the following: -* `db_name` - (Required) Name of the database requiring a uniqueness check. It may consist of lower case letters, numbers and underlines, and must start with a letter and have no more than 64 characters. +* `db_name` - (Required) Name of the database requiring a uniqueness check. It may consist of lower case letters, numbers and underlines, and must start with a letter and have no more than 64 characters. * `character_set_name` - (Required) Character set. The value range is limited to the following: - MySQL type: + utf8 @@ -78,7 +76,7 @@ The database mapping supports the following: + SQL_Latin1_General_CP1_CI_AS + SQL_Latin1_General_CP1_CS_AS + Chinese_PRC_BIN -* `db_description` - (Optional) Database description, which cannot exceed 256 characters. NOTE: It cannot begin with https://. +* `db_description` - (Optional) Database description, which cannot exceed 256 characters. NOTE: It cannot begin with https://. ~> **NOTE:** We neither support modify any of database attribute, nor insert/remove item at the same time. @@ -105,3 +103,4 @@ The following attributes are exported: * `backup_retention_period` - Retention days of the backup. * `security_ips` - Security ips of instance whitelist. * `connections` - Views all the connection information of a specified instance. + diff --git a/website/source/docs/providers/alicloud/r/disk.html.markdown b/website/source/docs/providers/alicloud/r/disk.html.markdown index 1b4098a31..82814b732 100644 --- a/website/source/docs/providers/alicloud/r/disk.html.markdown +++ b/website/source/docs/providers/alicloud/r/disk.html.markdown @@ -1,12 +1,12 @@ --- layout: "alicloud" page_title: "Alicloud: alicloud_disk" -sidebar_current: "docs-alicloud-resource-disk." +sidebar_current: "docs-alicloud-resource-disk" description: |- Provides a ECS Disk resource. --- -# alicloud_disk +# alicloud\_disk Provides a ECS disk resource. @@ -14,8 +14,10 @@ Provides a ECS disk resource. ## Example Usage -```hcl +``` +# Create a new ECS disk. resource "alicloud_disk" "ecs_disk" { + # cn-beijing availability_zone = "cn-beijing-b" name = "New-disk" description = "Hello ecs disk." @@ -32,7 +34,7 @@ resource "alicloud_disk" "ecs_disk" { The following arguments are supported: * `availability_zone` - (Required, Forces new resource) The Zone to create the disk in. -* `name` - (Optional) Name of the ECS disk. This name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","\_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Default value is null. +* `name` - (Optional) Name of the ECS disk. This name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Default value is null. * `description` - (Optional) Description of the disk. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null. * `category` - (Optional, Forces new resource) Category of the disk. Valid values are `cloud`, `cloud_efficiency` and `cloud_ssd`. Default is `cloud`. * `size` - (Required) The size of the disk in GiBs, and its value depends on `Category`. `cloud` disk value range: 5GB ~ 2000GB and other category disk value range: 20 ~ 32768. @@ -51,4 +53,4 @@ The following attributes are exported: * `category` - The disk category. * `size` - The disk size. * `snapshot_id` - The disk snapshot ID. -* `tags` - The disk tags. +* `tags` - The disk tags. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown b/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown index 4e500650f..d39e85a4c 100644 --- a/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown +++ b/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a ECS Disk Attachment resource. --- -# alicloud_disk_attachment +# alicloud\_disk\_attachment Provides an Alicloud ECS Disk Attachment as a resource, to attach and detach disks from ECS Instances. @@ -14,7 +14,9 @@ Provides an Alicloud ECS Disk Attachment as a resource, to attach and detach dis Basic usage -```hcl +``` +# Create a new ECS disk-attachment and use it attach one disk to a new instance. + resource "alicloud_security_group" "ecs_sg" { name = "terraform-test-group" description = "New security group" @@ -63,4 +65,4 @@ The following attributes are exported: * `instance_id` - ID of the Instance. * `disk_id` - ID of the Disk. -* `device_name` - The device name exposed to the instance. +* `device_name` - The device name exposed to the instance. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/eip.html.markdown b/website/source/docs/providers/alicloud/r/eip.html.markdown index e602d50c1..ddb1b7689 100644 --- a/website/source/docs/providers/alicloud/r/eip.html.markdown +++ b/website/source/docs/providers/alicloud/r/eip.html.markdown @@ -1,18 +1,18 @@ --- layout: "alicloud" page_title: "Alicloud: alicloud_eip" -sidebar_current: "docs-alicloud-resource-eip." +sidebar_current: "docs-alicloud-resource-eip" description: |- Provides a ECS EIP resource. --- -# alicloud_eip +# alicloud\_eip Provides a ECS EIP resource. ## Example Usage -```hcl +``` # Create a new EIP. resource "alicloud_eip" "example" { bandwidth = "10" diff --git a/website/source/docs/providers/alicloud/r/eip_association.html.markdown b/website/source/docs/providers/alicloud/r/eip_association.html.markdown index 2e3f996be..6c0d7059a 100644 --- a/website/source/docs/providers/alicloud/r/eip_association.html.markdown +++ b/website/source/docs/providers/alicloud/r/eip_association.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a ECS EIP Association resource. --- -# alicloud_eip_association +# alicloud\_eip\_association Provides an Alicloud EIP Association resource, to associate and disassociate Elastic IPs from ECS Instances. @@ -16,7 +16,9 @@ Provides an Alicloud EIP Association resource, to associate and disassociate Ela ## Example Usage -```hcl +``` +# Create a new EIP association and use it to associate a EIP form a instance. + resource "alicloud_vpc" "vpc" { cidr_block = "10.1.0.0/21" } @@ -71,4 +73,4 @@ The following arguments are supported: The following attributes are exported: * `allocation_id` - As above. -* `instance_id` - As above. +* `instance_id` - As above. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown new file mode 100644 index 000000000..003b7c988 --- /dev/null +++ b/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown @@ -0,0 +1,84 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_ess_scaling_configuration" +sidebar_current: "docs-alicloud-resource-ess-scaling-configuration" +description: |- + Provides a ESS scaling configuration resource. +--- + +# alicloud\_ess\_scaling\_configuration + +Provides a ESS scaling configuration resource. + +## Example Usage + +``` +resource "alicloud_security_group" "classic" { + # Other parameters... +} +resource "alicloud_ess_scaling_group" "scaling" { + min_size = 1 + max_size = 2 + removal_policies = ["OldestInstance", "NewestInstance"] +} + +resource "alicloud_ess_scaling_configuration" "config" { + scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}" + + image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" + instance_type = "ecs.s2.large" + security_group_id = "${alicloud_security_group.classic.id}" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `scaling_group_id` - (Required) ID of the scaling group of a scaling configuration. +* `image_id` - (Required) ID of an image file, indicating the image resource selected when an instance is enabled. +* `instance_type` - (Required) Resource type of an ECS instance. +* `io_optimized` - (Required) Valid values are `none`, `optimized`, If `optimized`, the launched ECS instance will be I/O optimized. +* `security_group_id` - (Required) ID of the security group to which a newly created instance belongs. +* `scaling_configuration_name` - (Optional) Name shown for the scheduled task. If this parameter value is not specified, the default value is ScalingConfigurationId. +* `internet_charge_type` - (Optional) Network billing type, Values: PayByBandwidth or PayByTraffic. If this parameter value is not specified, the default value is PayByBandwidth. +* `internet_max_bandwidth_in` - (Optional) Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200]. +* `internet_max_bandwidth_out` - (Optional) Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [1,100]. +* `system_disk_category` - (Optional) Category of the system disk. The parameter value options are cloud and ephemeral. +* `data_disk` - (Optional) DataDisk mappings to attach to ecs instance. See [Block datadisk](#block-datadisk) below for details. +* `instance_ids` - (Optional) ID of the ECS instance to be attached to the scaling group after it is enabled. You can input up to 20 IDs. + + +## Block datadisk + +The datadisk mapping supports the following: + +* `size` - (Optional) Size of data disk, in GB. The value ranges from 5 to 2,000 for a cloud disk and from 5 to 1,024 for an ephemeral disk. A maximum of four values can be entered. +* `category` - (Optional) Category of data disk. The parameter value options are cloud and ephemeral. +* `snapshot_id` - (Optional) Snapshot used for creating the data disk. If this parameter is specified, the size parameter is neglected, and the size of the created disk is the size of the snapshot. +* `device` - (Optional) Attaching point of the data disk. If this parameter is empty, the ECS automatically assigns the attaching point when an ECS is created. The parameter value ranges from /dev/xvdb to /dev/xvdz. Restrictions on attaching ECS instances: + - The attached ECS instance and the scaling group must be in the same region. + - The attached ECS instance and the instance with active scaling configurations must be of the same type. + - The attached ECS instance must in the running state. + - The attached ECS instance has not been attached to other scaling groups. + - The attached ECS instance supports Subscription and Pay-As-You-Go payment methods. + - If the VswitchID is specified for a scaling group, you cannot attach Classic ECS instances or ECS instances on other VPCs to the scaling group. + - If the VswitchID is not specified for the scaling group, ECS instances of the VPC type cannot be attached to the scaling group +* `active` - (Optional) If active current scaling configuration in the scaling group. +* `enable` - (Optional) Enables the specified scaling group. + - After the scaling group is successfully enabled (the group is active), the ECS instances specified by the interface are attached to the group. + - If the current number of ECS instances in the scaling group is still smaller than MinSize after the ECS instances specified by the interface are attached, the Auto Scaling service automatically creates ECS instances in Pay-As-You-Go mode to make odds even. For example, a scaling group is created with MinSize = 5. Two existing ECS instances are specified by the InstanceId.N parameter when the scaling group is enabled. Three additional ECS instances are automatically created after the two ECS instances are attached by the Auto Scaling service to the scaling group. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The scaling configuration ID. +* `active` - Wether the current scaling configuration is actived. +* `image_id` - The ecs instance Image id. +* `instance_type` - The ecs instance type. +* `io_optimized` - The ecs instance whether I/O optimized. +* `security_group_id` - ID of the security group to which a newly created instance belongs. +* `scaling_configuration_name` - Name of scaling configuration. +* `internet_charge_type` - Internet charge type of ecs instance. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown new file mode 100644 index 000000000..f039c5f19 --- /dev/null +++ b/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_ess_scaling_group" +sidebar_current: "docs-alicloud-resource-ess-scaling-group" +description: |- + Provides a ESS scaling group resource. +--- + +# alicloud\_ess\_scaling\_group + +Provides a ESS scaling group resource. + +## Example Usage + +``` +resource "alicloud_ess_scaling_group" "scaling" { + min_size = 1 + max_size = 2 + removal_policies = ["OldestInstance", "NewestInstance"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `min_size` - (Required) Minimum number of ECS instances in the scaling group. Value range: [0, 100]. +* `max_size` - (Required) Maximum number of ECS instances in the scaling group. Value range: [0, 100]. +* `scaling_group_name` - (Optional) Name shown for the scaling group, which must contain 2-40 characters (English or Chinese). If this parameter is not specified, the default value is ScalingGroupId. +* `default_cooldown` - (Optional) Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s. +* `vswitch_id` - (Optional) The virtual switch ID which the ecs instance to be create in. +* `removal_policies` - (Optional) RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values: + - OldestInstance: removes the first ECS instance attached to the scaling group. + - NewestInstance: removes the first ECS instance attached to the scaling group. + - OldestScalingConfiguration: removes the ECS instance with the oldest scaling configuration. + - Default values: OldestScalingConfiguration and OldestInstance. You can enter up to two removal policies. +* `db_instance_ids` - (Optional) If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist. + - The specified RDS instance must be in running status. + - The specified RDS instance’s whitelist must have room for more IP addresses. +* `loadbalancer_ids` - (Optional) If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance. + - The Server Load Balancer instance must be enabled. + - Health check must be enabled for all listener ports configured for the Server Load Balancer instance; otherwise, creation fails. + - The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group. + - The default weight of an ECS instance attached to the Server Load Balancer instance is 50. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The scaling group ID. +* `min_size` - The minimum number of ECS instances. +* `max_size` - The maximum number of ECS instances. +* `scaling_group_name` - The name of the scaling group. +* `default_cooldown` - The default cool-down of the scaling group. +* `removal_policies` - The removal policy used to select the ECS instance to remove from the scaling group. +* `db_instance_ids` - The db instance id which the ECS instance attached to. +* `loadbalancer_ids` - The slb instance id which the ECS instance attached to. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown new file mode 100644 index 000000000..ec24b5067 --- /dev/null +++ b/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown @@ -0,0 +1,59 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_ess_scaling_rule" +sidebar_current: "docs-alicloud-resource-ess-scaling-rule" +description: |- + Provides a ESS scaling rule resource. +--- + +# alicloud\_ess\_scaling\_rule + +Provides a ESS scaling rule resource. + +## Example Usage + +``` +resource "alicloud_ess_scaling_group" "scaling" { + # Other parameters... +} + +resource "alicloud_ess_scaling_configuration" "config" { + # Other parameters... +} + +resource "alicloud_ess_scaling_rule" "rule" { + scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}" + adjustment_type = "TotalCapacity" + adjustment_value = 2 + cooldown = 60 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `scaling_group_id` - (Required) ID of the scaling group of a scaling rule. +* `adjustment_type` - (Required) Adjustment mode of a scaling rule. Optional values: + - QuantityChangeInCapacity: It is used to increase or decrease a specified number of ECS instances. + - PercentChangeInCapacity: It is used to increase or decrease a specified proportion of ECS instances. + - TotalCapacity: It is used to adjust the quantity of ECS instances in the current scaling group to a specified value. +* `adjustment_value` - (Required) Adjusted value of a scaling rule. Value range: + - QuantityChangeInCapacity:(0, 100] U (-100, 0] + - PercentChangeInCapacity:[0, 10000] U [-10000, 0] + - TotalCapacity:[0, 100] +* `scaling_rule_name` - (Optional) Name shown for the scaling rule, which is a string containing 2 to 40 English or Chinese characters. +* `cooldown` - (Optional) Cool-down time of a scaling rule. Value range: [0, 86,400], in seconds. The default value is empty. + + +## Attributes Reference + +The following attributes are exported: + +* `id` - The scaling rule ID. +* `scaling_group_id` - The id of scaling group. +* `ari` - Unique identifier of a scaling rule. +* `adjustment_type` - Adjustment mode of a scaling rule. +* `adjustment_value` - Adjustment value of a scaling rule. +* `scaling_rule_name` - Name of a scaling rule. +* `cooldown` - Cool-down time of a scaling rule. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown b/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown new file mode 100644 index 000000000..abe2a298e --- /dev/null +++ b/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_ess_schedule" +sidebar_current: "docs-alicloud-resource-ess-schedule" +description: |- + Provides a ESS schedule resource. +--- + +# alicloud\_ess\_schedule + +Provides a ESS schedule resource. + +## Example Usage + +``` +resource "alicloud_ess_scaling_group" "scaling" { + # Other parameters... +} + +resource "alicloud_ess_scaling_configuration" "config" { + # Other parameters... +} + +resource "alicloud_ess_scaling_rule" "rule" { + # Other parameters... +} + +resource "alicloud_ess_schedule" "schedule" { + scheduled_action = "${alicloud_ess_scaling_rule.rule.ari}" + launch_time = "2017-04-29T07:30Z" + scheduled_task_name = "sg-schedule" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `scheduled_action` - (Required) Operations performed when the scheduled task is triggered. Fill in the unique identifier of the scaling rule. +* `launch_time` - (Required) Operations performed when the scheduled task is triggered. Fill in the unique identifier of the scaling rule. +* `scheduled_task_name` - (Optional) Display name of the scheduled task, which must be 2-40 characters (English or Chinese) long. +* `description` - (Optional) Description of the scheduled task, which is 2-200 characters (English or Chinese) long. +* `launch_expiration_time` - (Optional) Time period within which the failed scheduled task is retried. The default value is 600s. Value range: [0, 21600] +* `recurrence_type` - (Optional) Type of the scheduled task to be repeated. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified. Optional values: + - Daily: Recurrence interval by day for a scheduled task. + - Weekly: Recurrence interval by week for a scheduled task. + - Monthly: Recurrence interval by month for a scheduled task. +* `recurrence_value` - (Optional) Value of the scheduled task to be repeated. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified. + - Daily: Only one value in the range [1,31] can be filled. + - Weekly: Multiple values can be filled. The values of Sunday to Saturday are 0 to 6 in sequence. Multiple values shall be separated by a comma “,”. + - Monthly: In the format of A-B. The value range of A and B is 1 to 31, and the B value must be greater than the A value. +* `recurrence_end_time` - (Optional) End time of the scheduled task to be repeated. The date format follows the ISO8601 standard and uses UTC time. It is in the format of YYYY-MM-DDThh:mmZ. A time point 90 days after creation or modification cannot be entered. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified. +* `task_enabled` - (Optional) Whether to enable the scheduled task. The default value is true. + + +## Attributes Reference + +The following attributes are exported: + +* `id` - The schedule task ID. +* `scheduled_action` - The action of schedule task. +* `launch_time` - The time of schedule task be triggered. +* `scheduled_task_name` - The name of schedule task. +* `description` - The description of schedule task. +* `task_enabled` - Wether the task is enabled. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/forward.html.markdown b/website/source/docs/providers/alicloud/r/forward.html.markdown new file mode 100644 index 000000000..6024921a6 --- /dev/null +++ b/website/source/docs/providers/alicloud/r/forward.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_forward_entry" +sidebar_current: "docs-alicloud-resource-vpc" +description: |- + Provides a Alicloud forward resource. +--- + +# alicloud\_forward + +Provides a forward resource. + +## Example Usage + +Basic Usage + +``` +resource "alicloud_vpc" "foo" { + ... +} + +resource "alicloud_vswitch" "foo" { + ... +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + + bandwidth_packages = [ + { + ip_count = 2 + bandwidth = 5 + zone = "" + }, + { + ip_count = 1 + bandwidth = 6 + zone = "cn-beijing-b" + } + ] + + depends_on = [ + "alicloud_vswitch.foo", + ] +} + +resource "alicloud_forward_entry" "foo" { + forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" + external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" + external_port = "80" + ip_protocol = "tcp" + internal_ip = "172.16.0.3" + internal_port = "8080" +} + +``` +## Argument Reference + +The following arguments are supported: + +* `forward_table_id` - (Required, Forces new resource) The value can get from `alicloud_nat_gateway` Attributes "forward_table_ids". +* `external_ip` - (Required, Forces new resource) The external ip address, the ip must along bandwidth package public ip which `alicloud_nat_gateway` argument `bandwidth_packages`. +* `external_port` - (Required) The external port, valid value is 1~65535|any. +* `ip_protocol` - (Required) The ip protocal, valid value is tcp|udp|any. +* `internal_ip` - (Required) The internal ip, must a private ip. +* `internal_port` - (Required) The internal port, valid value is 1~65535|any. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/instance.html.markdown b/website/source/docs/providers/alicloud/r/instance.html.markdown index cb038ed1a..f09473df0 100644 --- a/website/source/docs/providers/alicloud/r/instance.html.markdown +++ b/website/source/docs/providers/alicloud/r/instance.html.markdown @@ -12,7 +12,7 @@ Provides a ECS instance resource. ## Example Usage -```hcl +``` # Create a new ECS instance for classic resource "alicloud_security_group" "classic" { name = "tf_test_foo" @@ -24,7 +24,7 @@ resource "alicloud_instance" "classic" { availability_zone = "cn-beijing-b" security_groups = ["${alicloud_security_group.classic.*.id}"] - allocate_public_ip = "true" + allocate_public_ip = true # series II instance_type = "ecs.n1.medium" @@ -36,11 +36,11 @@ resource "alicloud_instance" "classic" { # Create a new ECS instance for VPC resource "alicloud_vpc" "default" { - # ... + # Other parameters... } resource "alicloud_vswitch" "default" { - # ... + # Other parameters... } resource "alicloud_slb" "vpc" { @@ -59,18 +59,17 @@ The following arguments are supported: * `io_optimized` - (Required) Valid values are `none`, `optimized`, If `optimized`, the launched ECS instance will be I/O optimized. * `security_groups` - (Optional) A list of security group ids to associate with. * `availability_zone` - (Optional) The Zone to start the instance in. -* `instance_name` - (Optional) The name of the ECS. This instance_name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","\_", and must not begin or end with a hyphen, and must not begin with http:// or https://. If not specified, +* `instance_name` - (Optional) The name of the ECS. This instance_name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. If not specified, Terraform will autogenerate a default name is `ECS-Instance`. * `allocate_public_ip` - (Optional) Associate a public ip address with an instance in a VPC or Classic. Boolean value, Default is false. -* `system_disk_category` - (Optional) Valid values are `cloud`, `cloud_efficiency`, `cloud_ssd`, For I/O optimized instance type, `cloud_ssd` and `cloud_efficiency` disks are supported. For non I/O Optimized instance type, `cloud` disk are supported. +* `system_disk_category` - (Optional) Valid values are `cloud`, `cloud_efficiency`, `cloud_ssd`, For I/O optimized instance type, `cloud_ssd` and `cloud_efficiency` disks are supported. For non I/O Optimized instance type, `cloud` disk are supported. * `system_disk_size` - (Optional) Size of the system disk, value range: 40GB ~ 500GB. Default is 40GB. * `description` - (Optional) Description of the instance, This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null. * `internet_charge_type` - (Optional) Internet charge type of the instance, Valid values are `PayByBandwidth`, `PayByTraffic`. Default is `PayByBandwidth`. * `internet_max_bandwidth_in` - (Optional) Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). Value range: [1, 200]. If this value is not specified, then automatically sets it to 200 Mbps. -* `internet_max_bandwidth_out` - (Optional) Maximum outgoing bandwidth to the public network, measured in Mbps (Mega bit per second). Value range: -`internet_charge_type` is `PayByBandwidth`: this value range [0, 100], If this value is not specified, then automatically sets it to 0 Mbps; If `internet_charge_type` is `PayByTraffic`: this value range [1, 100]. this value must be set value, such as 5. -* `host_name` - (Optional) Host name of the ECS, which is a string of at least two characters. "hostname” cannot start or end with ".” or "-". In addition, two or more consecutive ".” or "-" symbols are not allowed. On Windows, the host name can contain a maximum of 15 characters, which can be a combination of uppercase/lowercase letters, numerals, and "-". The host name cannot contain dots (".”) or contain only numeric characters. -On other OSs such as Linux, the host name can contain a maximum of 30 characters, which can be segments separated by dots (".”), where each segment can contain uppercase/lowercase letters, numerals, or "\_". +* `internet_max_bandwidth_out` - (Optional) Maximum outgoing bandwidth to the public network, measured in Mbps (Mega bit per second). Value range: [0, 100], If this value is not specified, then automatically sets it to 0 Mbps. +* `host_name` - (Optional) Host name of the ECS, which is a string of at least two characters. “hostname” cannot start or end with “.” or “-“. In addition, two or more consecutive “.” or “-“ symbols are not allowed. On Windows, the host name can contain a maximum of 15 characters, which can be a combination of uppercase/lowercase letters, numerals, and “-“. The host name cannot contain dots (“.”) or contain only numeric characters. +On other OSs such as Linux, the host name can contain a maximum of 30 characters, which can be segments separated by dots (“.”), where each segment can contain uppercase/lowercase letters, numerals, or “_“. * `password` - (Optional) Password to an instance is a string of 8 to 30 characters. It must contain uppercase/lowercase letters and numerals, but cannot contain special symbols. * `vswitch_id` - (Optional) The virtual switch ID to launch in VPC. If you want to create instances in VPC network, this parameter must be set. * `instance_charge_type` - (Optional) Valid values are `PrePaid`, `PostPaid`, The default is `PostPaid`. @@ -94,4 +93,4 @@ The following attributes are exported: * `private_ip` - The instance private ip. * `public_ip` - The instance public ip. * `vswitch_id` - If the instance created in VPC, then this value is virtual switch ID. -* `tags` - The instance tags, use jsonencode(item) to display the value. +* `tags` - The instance tags, use jsonencode(item) to display the value. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown b/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown index 659bd7d1f..33ceb6e9b 100644 --- a/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown +++ b/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown @@ -6,15 +6,18 @@ description: |- Provides a resource to create a VPC NAT Gateway. --- -# alicloud_nat_gateway +# alicloud\_nat\_gateway Provides a resource to create a VPC NAT Gateway. +~> **NOTE:** alicloud_nat_gateway must depends on alicloud_vswitch. + + ## Example Usage Basic usage -```hcl +``` resource "alicloud_vpc" "vpc" { name = "tf_test_foo" cidr_block = "172.16.0.0/12" @@ -31,12 +34,11 @@ resource "alicloud_nat_gateway" "nat_gateway" { spec = "Small" name = "test_foo" - bandwidth_packages = [ - { - ip_count = 1 - bandwidth = 5 - zone = "cn-beijing-b" - }, + bandwidth_packages = [{ + ip_count = 1 + bandwidth = 5 + zone = "cn-beijing-b" + }, { ip_count = 2 bandwidth = 10 @@ -56,7 +58,7 @@ The following arguments are supported: * `vpc_id` - (Required, Forces New Resorce) The VPC ID. * `spec` - (Required, Forces New Resorce) The specification of the nat gateway. Valid values are `Small`, `Middle` and `Large`. Details refer to [Nat Gateway Specification](https://help.aliyun.com/document_detail/42757.html?spm=5176.doc32322.6.559.kFNBzv) -* `name` - (Optional) Name of the nat gateway. The value can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","\_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Defaults to null. +* `name` - (Optional) Name of the nat gateway. The value can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Defaults to null. * `description` - (Optional) Description of the nat gateway, This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Defaults to null. * `bandwidth_packages` - (Required) A list of bandwidth packages for the nat gatway. @@ -67,6 +69,7 @@ The bandwidth package mapping supports the following: * `ip_count` - (Required) The IP number of the current bandwidth package. Its value range from 1 to 50. * `bandwidth` - (Required) The bandwidth value of the current bandwidth package. Its value range from 5 to 5000. * `zone` - (Optional) The AZ for the current bandwidth. If this value is not specified, Terraform will set a random AZ. +* `public_ip_addresses` - (Computer) The public ip for bandwidth package. the public ip count equal `ip_count`, multi ip would complex with ",", such as "10.0.0.1,10.0.0.2". ## Attributes Reference @@ -78,3 +81,5 @@ The following attributes are exported: * `spec` - The specification of the nat gateway. * `vpc_id` - The VPC ID for the nat gateway. * `bandwidth_package_ids` - A list ID of the bandwidth packages, and split them with commas +* `snat_table_ids` - The nat gateway will auto create a snap and forward item, the `snat_table_ids` is the created one. +* `forward_table_ids` - The nat gateway will auto create a snap and forward item, the `forward_table_ids` is the created one. diff --git a/website/source/docs/providers/alicloud/r/security_group.html.markdown b/website/source/docs/providers/alicloud/r/security_group.html.markdown index 7825d37ae..c25dbe808 100644 --- a/website/source/docs/providers/alicloud/r/security_group.html.markdown +++ b/website/source/docs/providers/alicloud/r/security_group.html.markdown @@ -1,12 +1,12 @@ --- layout: "alicloud" page_title: "Alicloud: alicloud_security_group" -sidebar_current: "docs-alicloud-resource-security-group." +sidebar_current: "docs-alicloud-resource-security-group" description: |- Provides a Alicloud Security Group resource. --- -# alicloud_security_group +# alicloud\_security\_group Provides a security group resource. @@ -16,16 +16,15 @@ Provides a security group resource. Basic Usage -```hcl +``` resource "alicloud_security_group" "group" { name = "terraform-test-group" description = "New security group" } ``` - Basic usage for vpc -```hcl +``` resource "alicloud_security_group" "group" { name = "new-group" vpc_id = "${alicloud_vpc.vpc.id}" @@ -51,4 +50,4 @@ The following attributes are exported: * `id` - The ID of the security group * `vpc_id` - The VPC ID. * `name` - The name of the security group -* `description` - The description of the security group +* `description` - The description of the security group \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown b/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown index 2b58aa50f..feaab5b06 100644 --- a/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown +++ b/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Alicloud Security Group Rule resource. --- -# alicloud_security_group_rule +# alicloud\_security\_group\_rule Provides a security group rule resource. Represents a single `ingress` or `egress` group rule, which can be added to external Security Groups. @@ -18,7 +18,7 @@ Represents a single `ingress` or `egress` group rule, which can be added to exte Basic Usage -```hcl +``` resource "alicloud_security_group" "default" { name = "default" } @@ -58,4 +58,4 @@ The following attributes are exported: * `type` - The type of rule, `ingress` or `egress` * `name` - The name of the security group * `port_range` - The range of port numbers -* `ip_protocol` - The protocol of the security group rule +* `ip_protocol` - The protocol of the security group rule \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/slb.html.markdown b/website/source/docs/providers/alicloud/r/slb.html.markdown index b227bc75b..7feb52490 100644 --- a/website/source/docs/providers/alicloud/r/slb.html.markdown +++ b/website/source/docs/providers/alicloud/r/slb.html.markdown @@ -1,18 +1,18 @@ --- layout: "alicloud" page_title: "Alicloud: alicloud_slb" -sidebar_current: "docs-alicloud-resource-slb." +sidebar_current: "docs-alicloud-resource-slb" description: |- Provides an Application Load Banlancer resource. --- -# alicloud_slb +# alicloud\_slb Provides an Application Load Balancer resource. ## Example Usage -```hcl +``` # Create a new load balancer for classic resource "alicloud_slb" "classic" { name = "test-slb-tf" @@ -44,11 +44,11 @@ resource "alicloud_slb" "classic" { # Create a new load balancer for VPC resource "alicloud_vpc" "default" { - # ... + # Other parameters... } resource "alicloud_vswitch" "default" { - # ... + # Other parameters... } resource "alicloud_slb" "vpc" { @@ -62,7 +62,7 @@ resource "alicloud_slb" "vpc" { The following arguments are supported: * `name` - (Optional) The name of the SLB. This name must be unique within your AliCloud account, can have a maximum of 80 characters, -must contain only alphanumeric characters or hyphens, such as "-","/",".","\_", and must not begin or end with a hyphen. If not specified, +must contain only alphanumeric characters or hyphens, such as "-","/",".","_", and must not begin or end with a hyphen. If not specified, Terraform will autogenerate a name beginning with `tf-lb`. * `internet` - (Optional, Forces New Resource) If true, the SLB addressType will be internet, false will be intranet, Default is false. If load balancer launched in VPC, this value must be "false". * `internet_charge_type` - (Optional, Forces New Resource) Valid @@ -74,12 +74,59 @@ Terraform will autogenerate a name beginning with `tf-lb`. ## Block listener +load balance support 4 protocal to listen on, they are `http`,`https`,`tcp`,`udp`, the every listener support which portocal following: + +listener parameter | support protocol | value range | +------------- | ------------- | ------------- | +instance_port | http & https & tcp & udp | 1-65535 | +lb_port | http & https & tcp & udp | 1-65535 | +lb_protocol | http & https & tcp & udp | +bandwidth | http & https & tcp & udp | -1 / 1-1000 | +scheduler | http & https & tcp & udp | wrr or wlc | +sticky_session | http & https | on or off | +sticky_session_type | http & https | insert or server | +cookie_timeout | http & https | 1-86400 | +cookie | http & https | | +persistence_timeout | tcp & udp | 0-3600 | +health_check | http & https | on or off | +health_check_type | tcp | tcp or http | +health_check_domain | http & https & tcp | +health_check_uri | http & https & tcp | | +health_check_connect_port | http & https & tcp & udp | 1-65535 or -520 | +healthy_threshold | http & https & tcp & udp | 1-10 | +unhealthy_threshold | http & https & tcp & udp | 1-10 | +health_check_timeout | http & https & tcp & udp | 1-50 | +health_check_interval | http & https & tcp & udp | 1-5 | +health_check_http_code | http & https & tcp | http_2xx,http_3xx,http_4xx,http_5xx | +ssl_certificate_id | https | | + + The listener mapping supports the following: * `instance_port` - (Required) The port on which the backend servers are listening. Valid value is between 1 to 65535. * `lb_port` - (Required) The port on which the load balancer is listening. Valid value is between 1 to 65535. * `lb_protocol` - (Required) The protocol to listen on. Valid values are `http` and and `tcp` and `udp`. * `bandwidth` - (Required) The bandwidth on which the load balancer is listening. Valid values is -1 or between 1 and 1000. If -1, the bindwidth will haven’t upper limit. +* `scheduler` - (Optinal) Scheduling algorithm, Valid Value is `wrr` / `wlc`, Default is "wrr". +* `sticky_session` - (Optinal) Whether to enable session persistence, Value: `on` / `off`. +* `sticky_session_type` - (Optinal) Mode for handling the cookie. If "sticky_session" is on, the parameter is mandatory, and if "sticky_session" is off, the parameter will be ignored. Value:`insert` / `server`. If it is set to insert, it means it is inserted from Server Load Balancer; and if it is set to server, it means the Server Load Balancer learns from the backend server. +* `cookie_timeout` - (Optinal) The parameter is mandatory when "sticky_session" is on and "sticky_session_type" is insert. Otherwise, it will be ignored. Value: 1-86400(in seconds) +* `cookie` - (Optinal) The cookie configured on the server +It is mandatory only when "sticky_session" is on and "sticky_session_type" is server; otherwise, the parameter will be ignored. Value:String in line with RFC 2965, with length being 1- 200. It only contains characters such as ASCII codes, English letters and digits instead of the comma, semicolon or spacing, and it cannot start with $. +* `persistence_timeout` - (Optinal) Timeout of connection persistence. Value: 0-3600(in seconds) .Default:0 The value 0 indicates to close it. +* `health_check` - (Optinal) Whether to enable health check. Value:`on` / `off` +* `health_check_type` - (Optinal) Type of health check. Value:`tcp` | `http` , Default:`tcp` . TCP supports TCP and HTTP health check mode, you can select the particular mode depending on your application. +* `health_check_domain` - (Optinal) Domain name used for health check. When TCP listener need to use HTTP health check, this parameter will be configured; and when TCP health check is used, the parameter will be ignored. Value: `$_ip | custom string`. Rules of the custom string: its length is limited to 1-80 and only characters such as letters, digits, ‘-‘ and ‘.’ are allowed. When the parameter is set to $_ip by the user, Server Load Balancer uses the private network IP address of each backend server as Domain used for health check. +* `health_check_uri` - (Optinal) URI used for health check. When TCP listener need to use HTTP health check, this parameter will be configured; and when TCP health check is used, the parameter will be ignored. +Value:Its length is limited to 1-80 and it must start with /. Only characters such as letters, digits, ‘-’, ‘/’, ‘.’, ‘%’, ‘?’, #’ and ‘&’ are allowed. +* `health_check_connect_port` - (Optinal) Port used for health check. Value: `1-65535`, Default:None. When the parameter is not set, it means the backend server port is used (BackendServerPort). +* `healthy_threshold` - (Optinal) Threshold determining the result of the health check is success. Value:`1-10`, Default:3. +* `unhealthy_threshold` - (Optinal) Threshold determining the result of the health check is fail. Value:`1-10`, Default:3. +* `health_check_timeout` - (Optinal) Maximum timeout of each health check response. When "health_check" is on, the parameter is mandatory; and when "mandatory" is off, the parameter will be ignored. Value:`1-50`(in seconds). Note: If health_check_timeout < health_check_interval, health_check_timeout is invalid, and the timeout is health_check_interval. +* `health_check_interval` - (Optinal) Time interval of health checks. +When "health_check" is on, the parameter is mandatory; and when "health_check" is off, the parameter will be ignored. Value:`1-5` (in seconds) +* `health_check_http_code` - (Optinal) Regular health check HTTP status code. Multiple codes are segmented by “,”. When "health_check" is on, the parameter is mandatory; and when "health_check" is off, the parameter will be ignored. Value:`http_2xx` / `http_3xx` / `http_4xx` / `http_5xx`. +* `ssl_certificate_id` - (Optinal) Security certificate ID. ## Attributes Reference @@ -91,4 +138,4 @@ The following attributes are exported: * `internet_charge_type` - The internet_charge_type of the load balancer. * `bandwidth` - The bandwidth of the load balancer. * `vswitch_id` - The VSwitch ID of the load balancer. Only available on SLB launched in a VPC. -* `address` - The IP address of the load balancer. +* `address` - The IP address of the load balancer. \ No newline at end of file diff --git a/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown b/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown index 6ae4c1c58..ce3d7ac39 100644 --- a/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown +++ b/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown @@ -6,20 +6,20 @@ description: |- Provides an Application Load Banlancer Attachment resource. --- -# alicloud_slb_attachment +# alicloud\_slb\_attachment Provides an Application Load Balancer Attachment resource. ## Example Usage -```hcl +``` # Create a new load balancer attachment for classic resource "alicloud_slb" "default" { - # ... + # Other parameters... } resource "alicloud_instance" "default" { - # ... + # Other parameters... } resource "alicloud_slb_attachment" "default" { diff --git a/website/source/docs/providers/alicloud/r/snat.html.markdown b/website/source/docs/providers/alicloud/r/snat.html.markdown new file mode 100644 index 000000000..f39549387 --- /dev/null +++ b/website/source/docs/providers/alicloud/r/snat.html.markdown @@ -0,0 +1,61 @@ +--- +layout: "alicloud" +page_title: "Alicloud: alicloud_snat_entry" +sidebar_current: "docs-alicloud-resource-vpc" +description: |- + Provides a Alicloud snat resource. +--- + +# alicloud\_snat + +Provides a snat resource. + +## Example Usage + +Basic Usage + +``` +resource "alicloud_vpc" "foo" { + ... +} + +resource "alicloud_vswitch" "foo" { + ... +} + +resource "alicloud_nat_gateway" "foo" { + vpc_id = "${alicloud_vpc.foo.id}" + spec = "Small" + name = "test_foo" + + bandwidth_packages = [ + { + ip_count = 2 + bandwidth = 5 + zone = "" + }, + { + ip_count = 1 + bandwidth = 6 + zone = "cn-beijing-b" + } + ] + + depends_on = [ + "alicloud_vswitch.foo" + ] +} + +resource "alicloud_snat_entry" "foo" { + snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}" + source_vswitch_id = "${alicloud_vswitch.foo.id}" + snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" +} +``` +## Argument Reference + +The following arguments are supported: + +* `snat_table_id` - (Required, Forces new resource) The value can get from `alicloud_nat_gateway` Attributes "snat_table_ids". +* `source_vswitch_id` - (Required, Forces new resource) The vswitch ID. +* `snat_ip` - (Required) The SNAT ip address, the ip must along bandwidth package public ip which `alicloud_nat_gateway` argument `bandwidth_packages`. diff --git a/website/source/docs/providers/alicloud/r/vpc.html.markdown b/website/source/docs/providers/alicloud/r/vpc.html.markdown index b7dbc644b..f464326d8 100644 --- a/website/source/docs/providers/alicloud/r/vpc.html.markdown +++ b/website/source/docs/providers/alicloud/r/vpc.html.markdown @@ -16,13 +16,12 @@ Provides a VPC resource. Basic Usage -```hcl +``` resource "alicloud_vpc" "vpc" { name = "tf_test_foo" cidr_block = "172.16.0.0/12" } ``` - ## Argument Reference The following arguments are supported: diff --git a/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown b/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown index a684d1781..adca830be 100644 --- a/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown +++ b/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown @@ -14,7 +14,7 @@ Provides a route entry resource. Basic Usage -```hcl +``` resource "alicloud_vpc" "vpc" { name = "tf_test_foo" cidr_block = "172.16.0.0/12" @@ -29,7 +29,7 @@ resource "alicloud_route_entry" "default" { } resource "alicloud_instance" "snat" { - # ... + // ... } ``` ## Argument Reference diff --git a/website/source/docs/providers/alicloud/r/vswitch.html.markdown b/website/source/docs/providers/alicloud/r/vswitch.html.markdown index 335f5162b..8f6bbdb2f 100644 --- a/website/source/docs/providers/alicloud/r/vswitch.html.markdown +++ b/website/source/docs/providers/alicloud/r/vswitch.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a Alicloud VPC switch resource. --- -# alicloud_vswitch +# alicloud\_vswitch Provides a VPC switch resource. @@ -14,7 +14,7 @@ Provides a VPC switch resource. Basic Usage -```hcl +``` resource "alicloud_vpc" "vpc" { name = "tf_test_foo" cidr_block = "172.16.0.0/12" diff --git a/website/source/layouts/alicloud.erb b/website/source/layouts/alicloud.erb index f582d8324..f6695b85c 100644 --- a/website/source/layouts/alicloud.erb +++ b/website/source/layouts/alicloud.erb @@ -1,75 +1,122 @@ <% wrap_layout :inner do %> - <% content_for :sidebar do %> - + + <% end %> + + <%= yield %> <% end %> From ec85f833c7c572652572669ccb39d6e72899d936 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 18 Apr 2017 15:22:03 +0300 Subject: [PATCH 188/342] Update CHANGELOG.md --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 858382729..bc3666ccd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ FEATURES: * **New Data Source:** `google_compute_network` [GH-12442] * **New Data Source:** `google_compute_subnetwork` [GH-12442] * **New Resource:** `local_file` for creating local files (please see the docs for caveats) [GH-12757] +* **New Resource:** `alicloud_ess_scalinggroup` [GH-13731] +* **New Resource:** `alicloud_ess_scalingconfiguration` [GH-13731] +* **New Resource:** `alicloud_ess_scalingrule` [GH-13731] +* **New Resource:** `alicloud_ess_schedule` [GH-13731] +* **New Resource:** `alicloud_snat_entry` [GH-13731] +* **New Resource:** `alicloud_forward_entry` [GH-13731] + IMPROVEMENTS: * state/remote/swift: Support Openstack request logging [GH-13583] @@ -21,6 +28,8 @@ BUG FIXES: * core: Fix a crash condition by improving the flatmap.Expand() logic [GH-13541] * provider/alicloud: Fix create PrePaid instance [GH-13662] * provider/alicloud: Fix allocate public ip error [GH-13268] + * provider/alicloud: alicloud_security_group_rule: check ptr before use it [GH-13731) + * provider/alicloud: alicloud_instance: fix ecs internet_max_bandwidth_out cannot set zero bug [GH-13731] * provider/aws: Fix DB Parameter Group Name [GH-13279] * provider/aws: Increase default number of retries from 11 to 25 [GH-13673] * provider/aws: Use mutex & retry for WAF change operations [GH-13656] From 47d255f943f26790f88664f2d8b91f16929a60b0 Mon Sep 17 00:00:00 2001 From: KOJIMA Kazunori Date: Tue, 18 Apr 2017 21:29:14 +0900 Subject: [PATCH 189/342] provider/aws: Add aws_kms_alias datasource (#13669) --- .../aws/data_source_aws_kms_alias.go | 62 +++++++++++++++ .../aws/data_source_aws_kms_alias_test.go | 77 +++++++++++++++++++ builtin/providers/aws/provider.go | 1 + .../providers/aws/resource_aws_kms_alias.go | 9 +-- builtin/providers/aws/validators.go | 9 +++ builtin/providers/aws/validators_test.go | 32 ++++++++ .../providers/aws/d/kms_alias.html.markdown | 30 ++++++++ 7 files changed, 212 insertions(+), 8 deletions(-) create mode 100644 builtin/providers/aws/data_source_aws_kms_alias.go create mode 100644 builtin/providers/aws/data_source_aws_kms_alias_test.go create mode 100644 website/source/docs/providers/aws/d/kms_alias.html.markdown diff --git a/builtin/providers/aws/data_source_aws_kms_alias.go b/builtin/providers/aws/data_source_aws_kms_alias.go new file mode 100644 index 000000000..41c33b680 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_kms_alias.go @@ -0,0 +1,62 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsKmsAlias() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsKmsAliasRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsKmsName, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "target_key_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + params := &kms.ListAliasesInput{} + + target := d.Get("name") + var alias *kms.AliasListEntry + err := conn.ListAliasesPages(params, func(page *kms.ListAliasesOutput, lastPage bool) bool { + for _, entity := range page.Aliases { + if *entity.AliasName == target { + alias = entity + return false + } + } + + return true + }) + if err != nil { + return errwrap.Wrapf("Error fetch KMS alias list: {{err}}", err) + } + + if alias == nil { + return fmt.Errorf("No alias with name %q found in this region.", target) + } + + d.SetId(time.Now().UTC().String()) + d.Set("arn", alias.AliasArn) + d.Set("target_key_id", alias.TargetKeyId) + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_kms_alias_test.go b/builtin/providers/aws/data_source_aws_kms_alias_test.go new file mode 100644 index 000000000..c498d5168 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_kms_alias_test.go @@ -0,0 +1,77 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceAwsKmsAlias(t *testing.T) { + rInt := acctest.RandInt() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsKmsAlias(rInt), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsKmsAliasCheck("data.aws_kms_alias.by_name"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsKmsAliasCheck(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("root module has no resource called %s", name) + } + + kmsKeyRs, ok := s.RootModule().Resources["aws_kms_alias.single"] + if !ok { + return fmt.Errorf("can't find aws_kms_alias.single in state") + } + + attr := rs.Primary.Attributes + + if attr["arn"] != kmsKeyRs.Primary.Attributes["arn"] { + return fmt.Errorf( + "arn is %s; want %s", + attr["arn"], + kmsKeyRs.Primary.Attributes["arn"], + ) + } + + if attr["target_key_id"] != kmsKeyRs.Primary.Attributes["target_key_id"] { + return fmt.Errorf( + "target_key_id is %s; want %s", + attr["target_key_id"], + kmsKeyRs.Primary.Attributes["target_key_id"], + ) + } + + return nil + } +} + +func testAccDataSourceAwsKmsAlias(rInt int) string { + return fmt.Sprintf(` +resource "aws_kms_key" "one" { + description = "Terraform acc test" + deletion_window_in_days = 7 +} + +resource "aws_kms_alias" "single" { + name = "alias/tf-acc-key-alias-%d" + target_key_id = "${aws_kms_key.one.key_id}" +} + +data "aws_kms_alias" "by_name" { + name = "${aws_kms_alias.single.name}" +}`, rInt) +} diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index b1f9c2bf4..28a9443e6 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -185,6 +185,7 @@ func Provider() terraform.ResourceProvider { "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), "aws_instance": dataSourceAwsInstance(), "aws_ip_ranges": dataSourceAwsIPRanges(), + "aws_kms_alias": dataSourceAwsKmsAlias(), "aws_kms_secret": dataSourceAwsKmsSecret(), "aws_partition": dataSourceAwsPartition(), "aws_prefix_list": dataSourceAwsPrefixList(), diff --git a/builtin/providers/aws/resource_aws_kms_alias.go b/builtin/providers/aws/resource_aws_kms_alias.go index 64eec56a6..b02ffefba 100644 --- a/builtin/providers/aws/resource_aws_kms_alias.go +++ b/builtin/providers/aws/resource_aws_kms_alias.go @@ -29,14 +29,7 @@ func resourceAwsKmsAlias() *schema.Resource { Optional: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) - } - return - }, + ValidateFunc: validateAwsKmsName, }, "name_prefix": &schema.Schema{ Type: schema.TypeString, diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go index dced0935d..9a7bf0e0a 100644 --- a/builtin/providers/aws/validators.go +++ b/builtin/providers/aws/validators.go @@ -1209,3 +1209,12 @@ func validateOpenIdURL(v interface{}, k string) (ws []string, errors []error) { } return } + +func validateAwsKmsName(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) + } + return +} diff --git a/builtin/providers/aws/validators_test.go b/builtin/providers/aws/validators_test.go index 06c225cac..4638f0ba0 100644 --- a/builtin/providers/aws/validators_test.go +++ b/builtin/providers/aws/validators_test.go @@ -1981,3 +1981,35 @@ func TestValidateOpenIdURL(t *testing.T) { } } } + +func TestValidateAwsKmsName(t *testing.T) { + cases := []struct { + Value string + ErrCount int + }{ + { + Value: "alias/aws/s3", + ErrCount: 0, + }, + { + Value: "alias/hashicorp", + ErrCount: 0, + }, + { + Value: "hashicorp", + ErrCount: 1, + }, + { + Value: "hashicorp/terraform", + ErrCount: 1, + }, + } + + for _, tc := range cases { + _, errors := validateAwsKmsName(tc.Value, "name") + if len(errors) != tc.ErrCount { + t.Fatalf("AWS KMS Alias Name validation failed: %v", errors) + } + } + +} diff --git a/website/source/docs/providers/aws/d/kms_alias.html.markdown b/website/source/docs/providers/aws/d/kms_alias.html.markdown new file mode 100644 index 000000000..b37e77488 --- /dev/null +++ b/website/source/docs/providers/aws/d/kms_alias.html.markdown @@ -0,0 +1,30 @@ +--- +layout: "aws" +page_title: "AWS: aws_kms_alias" +sidebar_current: "docs-aws-datasource-kms-alias" +description: |- + Get information on a AWS Key Management Service (KMS) Alias +--- + +# aws\_kms\_alias + +Use this data source to get the ARN of a KMS key alias. +By using this data source, you can reference key alias +without having to hard code the ARN as input. + +## Example Usage + +```hcl +data "aws_kms_alias" "s3" { + name = "alias/aws/s3" +} +``` + +## Argument Reference + +* `name` - (Required) The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) + +## Attributes Reference + +* `arn` - The Amazon Resource Name(ARN) of the key alias. +* `target_key_id` - Key identifier pointed to by the alias. From fd1b430e5f8f3deeb59082a8f50fecf6953a82ec Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 18 Apr 2017 15:29:46 +0300 Subject: [PATCH 190/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc3666ccd..af1a69ed6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Data Source:** `aws_kms_alias` [GH-13669] * **New Data Source:** `google_compute_network` [GH-12442] * **New Data Source:** `google_compute_subnetwork` [GH-12442] * **New Resource:** `local_file` for creating local files (please see the docs for caveats) [GH-12757] From 1b841e77debca2425da2731fb0452c1e85a75545 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 18 Apr 2017 15:59:44 +0100 Subject: [PATCH 191/342] provider/aws: Run all AWSConfig acc tests sequentially (#13658) --- .../resource_aws_config_config_rule_test.go | 10 ++--- ...nfig_configuration_recorder_status_test.go | 6 +-- ..._aws_config_configuration_recorder_test.go | 6 +-- ...source_aws_config_delivery_channel_test.go | 6 +-- .../providers/aws/resource_aws_config_test.go | 44 +++++++++++++++++++ 5 files changed, 58 insertions(+), 14 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_config_test.go diff --git a/builtin/providers/aws/resource_aws_config_config_rule_test.go b/builtin/providers/aws/resource_aws_config_config_rule_test.go index 6540fc5de..42f3047bd 100644 --- a/builtin/providers/aws/resource_aws_config_config_rule_test.go +++ b/builtin/providers/aws/resource_aws_config_config_rule_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSConfigConfigRule_basic(t *testing.T) { +func testAccConfigConfigRule_basic(t *testing.T) { var cr configservice.ConfigRule rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) @@ -37,7 +37,7 @@ func TestAccAWSConfigConfigRule_basic(t *testing.T) { }) } -func TestAccAWSConfigConfigRule_ownerAws(t *testing.T) { +func testAccConfigConfigRule_ownerAws(t *testing.T) { var cr configservice.ConfigRule rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) @@ -72,7 +72,7 @@ func TestAccAWSConfigConfigRule_ownerAws(t *testing.T) { }) } -func TestAccAWSConfigConfigRule_customlambda(t *testing.T) { +func testAccConfigConfigRule_customlambda(t *testing.T) { var cr configservice.ConfigRule rInt := acctest.RandInt() @@ -113,7 +113,7 @@ func TestAccAWSConfigConfigRule_customlambda(t *testing.T) { }) } -func TestAccAWSConfigConfigRule_importAws(t *testing.T) { +func testAccConfigConfigRule_importAws(t *testing.T) { resourceName := "aws_config_config_rule.foo" rInt := acctest.RandInt() @@ -135,7 +135,7 @@ func TestAccAWSConfigConfigRule_importAws(t *testing.T) { }) } -func TestAccAWSConfigConfigRule_importLambda(t *testing.T) { +func testAccConfigConfigRule_importLambda(t *testing.T) { resourceName := "aws_config_config_rule.foo" rInt := acctest.RandInt() diff --git a/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go b/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go index 3967bcb3a..ded0d16b4 100644 --- a/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go +++ b/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSConfigConfigurationRecorderStatus_basic(t *testing.T) { +func testAccConfigConfigurationRecorderStatus_basic(t *testing.T) { var cr configservice.ConfigurationRecorder var crs configservice.ConfigurationRecorderStatus rInt := acctest.RandInt() @@ -36,7 +36,7 @@ func TestAccAWSConfigConfigurationRecorderStatus_basic(t *testing.T) { }) } -func TestAccAWSConfigConfigurationRecorderStatus_startEnabled(t *testing.T) { +func testAccConfigConfigurationRecorderStatus_startEnabled(t *testing.T) { var cr configservice.ConfigurationRecorder var crs configservice.ConfigurationRecorderStatus rInt := acctest.RandInt() @@ -81,7 +81,7 @@ func TestAccAWSConfigConfigurationRecorderStatus_startEnabled(t *testing.T) { }) } -func TestAccAWSConfigConfigurationRecorderStatus_importBasic(t *testing.T) { +func testAccConfigConfigurationRecorderStatus_importBasic(t *testing.T) { resourceName := "aws_config_configuration_recorder_status.foo" rInt := acctest.RandInt() diff --git a/builtin/providers/aws/resource_aws_config_configuration_recorder_test.go b/builtin/providers/aws/resource_aws_config_configuration_recorder_test.go index 8a7363d18..8180f6694 100644 --- a/builtin/providers/aws/resource_aws_config_configuration_recorder_test.go +++ b/builtin/providers/aws/resource_aws_config_configuration_recorder_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSConfigConfigurationRecorder_basic(t *testing.T) { +func testAccConfigConfigurationRecorder_basic(t *testing.T) { var cr configservice.ConfigurationRecorder rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) @@ -39,7 +39,7 @@ func TestAccAWSConfigConfigurationRecorder_basic(t *testing.T) { }) } -func TestAccAWSConfigConfigurationRecorder_allParams(t *testing.T) { +func testAccConfigConfigurationRecorder_allParams(t *testing.T) { var cr configservice.ConfigurationRecorder rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) @@ -70,7 +70,7 @@ func TestAccAWSConfigConfigurationRecorder_allParams(t *testing.T) { }) } -func TestAccAWSConfigConfigurationRecorder_importBasic(t *testing.T) { +func testAccConfigConfigurationRecorder_importBasic(t *testing.T) { resourceName := "aws_config_configuration_recorder.foo" rInt := acctest.RandInt() diff --git a/builtin/providers/aws/resource_aws_config_delivery_channel_test.go b/builtin/providers/aws/resource_aws_config_delivery_channel_test.go index 6f0d5fd4d..098465f9d 100644 --- a/builtin/providers/aws/resource_aws_config_delivery_channel_test.go +++ b/builtin/providers/aws/resource_aws_config_delivery_channel_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSConfigDeliveryChannel_basic(t *testing.T) { +func testAccConfigDeliveryChannel_basic(t *testing.T) { var dc configservice.DeliveryChannel rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) @@ -36,7 +36,7 @@ func TestAccAWSConfigDeliveryChannel_basic(t *testing.T) { }) } -func TestAccAWSConfigDeliveryChannel_allParams(t *testing.T) { +func testAccConfigDeliveryChannel_allParams(t *testing.T) { var dc configservice.DeliveryChannel rInt := acctest.RandInt() expectedName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) @@ -64,7 +64,7 @@ func TestAccAWSConfigDeliveryChannel_allParams(t *testing.T) { }) } -func TestAccAWSConfigDeliveryChannel_importBasic(t *testing.T) { +func testAccConfigDeliveryChannel_importBasic(t *testing.T) { resourceName := "aws_config_delivery_channel.foo" rInt := acctest.RandInt() diff --git a/builtin/providers/aws/resource_aws_config_test.go b/builtin/providers/aws/resource_aws_config_test.go new file mode 100644 index 000000000..f24424624 --- /dev/null +++ b/builtin/providers/aws/resource_aws_config_test.go @@ -0,0 +1,44 @@ +package aws + +import ( + "testing" +) + +func TestAccAWSConfig(t *testing.T) { + testCases := map[string]map[string]func(t *testing.T){ + "Config": { + "basic": testAccConfigConfigRule_basic, + "ownerAws": testAccConfigConfigRule_ownerAws, + "customlambda": testAccConfigConfigRule_customlambda, + "importAws": testAccConfigConfigRule_importAws, + "importLambda": testAccConfigConfigRule_importLambda, + }, + "ConfigurationRecorderStatus": { + "basic": testAccConfigConfigurationRecorderStatus_basic, + "startEnabled": testAccConfigConfigurationRecorderStatus_startEnabled, + "importBasic": testAccConfigConfigurationRecorderStatus_importBasic, + }, + "ConfigurationRecorder": { + "basic": testAccConfigConfigurationRecorder_basic, + "allParams": testAccConfigConfigurationRecorder_allParams, + "importBasic": testAccConfigConfigurationRecorder_importBasic, + }, + "DeliveryChannel": { + "basic": testAccConfigDeliveryChannel_basic, + "allParams": testAccConfigDeliveryChannel_allParams, + "importBasic": testAccConfigDeliveryChannel_importBasic, + }, + } + + for group, m := range testCases { + m := m + t.Run(group, func(t *testing.T) { + for name, tc := range m { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } + }) + } +} From a8b840458901cdab884fbf128ea6429c2c80528e Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Thu, 13 Apr 2017 19:41:14 +0000 Subject: [PATCH 192/342] rdpool doc --- .../providers/ultradns/r/rdpool.html.markdown | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 website/source/docs/providers/ultradns/r/rdpool.html.markdown diff --git a/website/source/docs/providers/ultradns/r/rdpool.html.markdown b/website/source/docs/providers/ultradns/r/rdpool.html.markdown new file mode 100644 index 000000000..be7410fc1 --- /dev/null +++ b/website/source/docs/providers/ultradns/r/rdpool.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "ultradns" +page_title: "UltraDNS: ultradns_rdpool" +sidebar_current: "docs-ultradns-resource-rdpool" +description: |- + Provides an UltraDNS Resource Distribution pool resource. +--- + +# ultradns\_rdpool + +Provides an UltraDNS Resource Distribution (RD) pool resource, which are +used to define rules for returning multiple A or AAAA records for a given owner name. Ordering can be FIXED, RANDOM or ROUND_ROBIN. + +## Example Usage +``` +# Create a Resource Distribution pool + +resource "ultradns_rdpool" "pool" { + zone = "${var.ultradns_domain}" + name = "terraform-rdpool" + ttl = 600 + description = "Example RD Pool" + order = "ROUND_ROBIN" + rdata = [ "192.168.0.10", "192.168.0.11" ] +} +``` + +## Argument Reference + +See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values. + +The following arguments are supported: + +* `zone` - (Required) The domain to add the record to +* `name` - (Required) The name of the record +* `order` - (Required) Ordering rule, one of FIXED, RANDOM or ROUND_ROBIN +* `rdata` - (Required) list ip addresses. +* `description` - (Optional) Description of the Resource Distribution pool. Valid values are strings less than 256 characters. +* `ttl` - (Optional) The TTL of the pool in seconds. Default: `3600`. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The record ID +* `hostname` - The FQDN of the record From 801e7b70e625f705f494fc4d8bda1322888e097b Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Thu, 13 Apr 2017 20:11:43 +0000 Subject: [PATCH 193/342] support for rdpool resource --- builtin/providers/ultradns/common_test.go | 23 ++ .../ultradns/resource_ultradns_rdpool.go | 243 ++++++++++++++++++ .../ultradns/resource_ultradns_rdpool_test.go | 100 +++++++ 3 files changed, 366 insertions(+) create mode 100644 builtin/providers/ultradns/resource_ultradns_rdpool.go create mode 100644 builtin/providers/ultradns/resource_ultradns_rdpool_test.go diff --git a/builtin/providers/ultradns/common_test.go b/builtin/providers/ultradns/common_test.go index 24470e0d3..05823fdcd 100644 --- a/builtin/providers/ultradns/common_test.go +++ b/builtin/providers/ultradns/common_test.go @@ -8,6 +8,29 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func testAccRdpoolCheckDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*udnssdk.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ultradns_rdpool" { + continue + } + + k := udnssdk.RRSetKey{ + Zone: rs.Primary.Attributes["zone"], + Name: rs.Primary.Attributes["name"], + Type: rs.Primary.Attributes["type"], + } + + _, err := client.RRSets.Select(k) + if err == nil { + return fmt.Errorf("Record still exists") + } + } + + return nil +} + func testAccTcpoolCheckDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*udnssdk.Client) diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool.go b/builtin/providers/ultradns/resource_ultradns_rdpool.go new file mode 100644 index 000000000..e67b57219 --- /dev/null +++ b/builtin/providers/ultradns/resource_ultradns_rdpool.go @@ -0,0 +1,243 @@ +package ultradns + +import ( + "fmt" + "log" + "strings" + + "github.com/Ensighten/udnssdk" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceUltradnsRdpool() *schema.Resource { + return &schema.Resource{ + Create: resourceUltradnsRdpoolCreate, + Read: resourceUltradnsRdpoolRead, + Update: resourceUltradnsRdpoolUpdate, + Delete: resourceUltradnsRdpoolDelete, + + Schema: map[string]*schema.Schema{ + // Required + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "order": &schema.Schema{ + Type: schema.TypeString, + Required: true, + // 0-255 char + // FIXED | RANDOM | ROUND_ROBIN + }, + "rdata": &schema.Schema{ + Type: schema.TypeSet, + Set: schema.HashString, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + // Optional + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // 0-255 char + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 3600, + }, + // Computed + "hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// CRUD Operations + +func resourceUltradnsRdpoolCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool create") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool create: %#v", r) + _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) + if err != nil { + return fmt.Errorf("create failed: %#v -> %v", r, err) + } + + d.SetId(r.ID()) + log.Printf("[INFO] ultradns_rdpool.id: %v", d.Id()) + + return resourceUltradnsRdpoolRead(d, meta) +} + +func resourceUltradnsRdpoolRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool read") + client := meta.(*udnssdk.Client) + + rr, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + rrsets, err := client.RRSets.Select(rr.RRSetKey()) + if err != nil { + uderr, ok := err.(*udnssdk.ErrorResponseList) + if ok { + for _, resps := range uderr.Responses { + // 70002 means Records Not Found + if resps.ErrorCode == 70002 { + d.SetId("") + return nil + } + return fmt.Errorf("resource not found: %v", err) + } + } + return fmt.Errorf("resource not found: %v", err) + } + + r := rrsets[0] + + zone := d.Get("zone") + // ttl + d.Set("ttl", r.TTL) + // hostname + if r.OwnerName == "" { + d.Set("hostname", zone) + } else { + if strings.HasSuffix(r.OwnerName, ".") { + d.Set("hostname", r.OwnerName) + } else { + d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) + } + } + + // And now... the Profile! + if r.Profile == nil { + return fmt.Errorf("RRSet.profile missing: invalid RDPool schema in: %#v", r) + } + p, err := r.Profile.RDPoolProfile() + if err != nil { + return fmt.Errorf("RRSet.profile could not be unmarshalled: %v\n", err) + } + + // Set simple values + d.Set("description", p.Description) + d.Set("order", p.Order) + + // TODO: rigorously test this to see if we can remove the error handling + + //TODO + + //err = d.Set("rdata", makeSetFromStrings(r.RData)) + //err = d.Set("rdata", makeSetFromRdataAlone(r.RData)) + if err != nil { + return fmt.Errorf("rdata set failed: %#v", err) + } + return nil +} + +func resourceUltradnsRdpoolUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool update") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool update: %+v", r) + _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) + if err != nil { + return fmt.Errorf("resource update failed: %v", err) + } + + return resourceUltradnsRdpoolRead(d, meta) +} + +func resourceUltradnsRdpoolDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] ultradns_rdpool delete") + client := meta.(*udnssdk.Client) + + r, err := newRRSetResourceFromRdpool(d) + if err != nil { + return err + } + + log.Printf("[INFO] ultradns_rdpool delete: %+v", r) + _, err = client.RRSets.Delete(r.RRSetKey()) + if err != nil { + return fmt.Errorf("resource delete failed: %v", err) + } + + return nil +} + +// Resource Helpers + +func newRRSetResourceFromRdpool(d *schema.ResourceData) (rRSetResource, error) { + //rDataRaw := d.Get("rdata").(*schema.Set).List() + r := rRSetResource{ + // "The only valid rrtype value for SiteBacker or Traffic Controller pools is A" + // per https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf + RRType: "A", + Zone: d.Get("zone").(string), + OwnerName: d.Get("name").(string), + TTL: d.Get("ttl").(int), + //RData: unzipRdataHosts(rDataRaw), + } + if attr, ok := d.GetOk("rdata"); ok { + rdata := attr.(*schema.Set).List() + r.RData = make([]string, len(rdata)) + for i, j := range rdata { + r.RData[i] = j.(string) + } + } + + profile := udnssdk.RDPoolProfile{ + Context: udnssdk.RDPoolSchema, + Order: d.Get("order").(string), + Description: d.Get("description").(string), + } + + rp := profile.RawProfile() + r.Profile = rp + + return r, nil +} + +// zip RData into []map[string]interface{} +func zipRDataAlone(rds []string) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(rds)) + for _, rd := range rds { + r := map[string]interface{}{ + // "host": rds[i], + "host": rd, + } + result = append(result, r) + } + return result +} + +// makeSetFromRdatas encodes an array of Rdata into a +// *schema.Set in the appropriate structure for the schema +func makeSetFromRdataAlone(rds []string) *schema.Set { + s := &schema.Set{F: hashRdatas} + rs := zipRDataAlone(rds) + for _, r := range rs { + s.Add(r) + } + return s +} diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool_test.go b/builtin/providers/ultradns/resource_ultradns_rdpool_test.go new file mode 100644 index 000000000..1ddd9c025 --- /dev/null +++ b/builtin/providers/ultradns/resource_ultradns_rdpool_test.go @@ -0,0 +1,100 @@ +package ultradns + +import ( + "fmt" + "testing" + + "github.com/Ensighten/udnssdk" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccUltradnsRdpool(t *testing.T) { + var record udnssdk.RRSet + domain := "ultradns.phinze.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccRdpoolCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf(testCfgRdpoolMinimal, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), + // Specified + resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-minimal"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), + + // hashRdatas(): 10.6.0.1 -> 2847814707 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.host", "10.6.0.1"), + // Defaults + resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "Minimal RD Pool"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.priority", "1"), + // Generated + resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-minimal.ultradns.phinze.com"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-minimal.ultradns.phinze.com."), + ), + }, + resource.TestStep{ + Config: fmt.Sprintf(testCfgRdpoolMaximal, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), + // Specified + resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-maximal"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "traffic controller pool with all settings tuned"), + + resource.TestCheckResourceAttr("ultradns_rdpool.it", "act_on_probes", "false"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "max_to_lb", "2"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "run_probes", "false"), + + // hashRdatas(): 10.6.1.1 -> 2826722820 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.host", "10.6.1.1"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.priority", "1"), + + // hashRdatas(): 10.6.1.2 -> 829755326 + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.host", "10.6.1.2"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.priority", "2"), + + // Generated + resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-maximal.ultradns.phinze.com"), + resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-maximal.ultradns.phinze.com."), + ), + }, + }, + }) +} + +const testCfgRdpoolMinimal = ` +resource "ultradns_rdpool" "it" { + zone = "%s" + name = "test-rdpool-minimal" + ttl = 300 + description = "Minimal RD Pool" + + rdata { + host = "10.6.0.1" + } +} +` + +const testCfgRdpoolMaximal = ` +resource "ultradns_rdpool" "it" { + zone = "%s" + name = "test-rdpool-maximal" + order = "ROUND_ROBIN" + ttl = 300 + description = "traffic controller pool with all settings tuned" + rdata { + host = "10.6.1.1" + priority = 1 + } + + rdata { + host = "10.6.1.2" + priority = 2 + } +} +` From 04553c5d4ef737fd15ead8a0d412332f3f1d98a4 Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Thu, 13 Apr 2017 20:19:08 +0000 Subject: [PATCH 194/342] add rdpool --- builtin/providers/ultradns/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/ultradns/provider.go b/builtin/providers/ultradns/provider.go index e10015ab2..70f0dbed7 100644 --- a/builtin/providers/ultradns/provider.go +++ b/builtin/providers/ultradns/provider.go @@ -38,6 +38,7 @@ func Provider() terraform.ResourceProvider { "ultradns_probe_ping": resourceUltradnsProbePing(), "ultradns_record": resourceUltradnsRecord(), "ultradns_tcpool": resourceUltradnsTcpool(), + "ultradns_rdpool": resourceUltradnsRdpool(), }, ConfigureFunc: providerConfigure, From 37810872a07ac41bb427690c4acdd755ad7a2a95 Mon Sep 17 00:00:00 2001 From: ebilhoo Date: Sat, 15 Apr 2017 19:35:26 +0000 Subject: [PATCH 195/342] resourceUltradnsRdpoolRead set rdata --- builtin/providers/ultradns/resource_ultradns_rdpool.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool.go b/builtin/providers/ultradns/resource_ultradns_rdpool.go index e67b57219..a45ff6939 100644 --- a/builtin/providers/ultradns/resource_ultradns_rdpool.go +++ b/builtin/providers/ultradns/resource_ultradns_rdpool.go @@ -137,11 +137,7 @@ func resourceUltradnsRdpoolRead(d *schema.ResourceData, meta interface{}) error d.Set("description", p.Description) d.Set("order", p.Order) - // TODO: rigorously test this to see if we can remove the error handling - - //TODO - - //err = d.Set("rdata", makeSetFromStrings(r.RData)) + err = d.Set("rdata", makeSetFromStrings(r.RData)) //err = d.Set("rdata", makeSetFromRdataAlone(r.RData)) if err != nil { return fmt.Errorf("rdata set failed: %#v", err) From b6866f2187a9b391cc5102f4f81b1fc73004ae9a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 13 Apr 2017 15:26:51 +0100 Subject: [PATCH 196/342] Locking the NSG to only operate on one resource at a time in the create --- .../azurerm/resource_arm_virtual_network.go | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go index 5d4ba9a29..9f6172036 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ b/builtin/providers/azurerm/resource_arm_virtual_network.go @@ -97,6 +97,22 @@ func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) e Tags: expandTags(tags), } + networkSecurityGroupNames := make([]string, 0) + for _, subnet := range *vnet.VirtualNetworkPropertiesFormat.Subnets { + if subnet.NetworkSecurityGroup != nil { + subnetId := *subnet.NetworkSecurityGroup.ID + id, err := parseAzureResourceID(subnetId) + if err != nil { + return fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", subnetId, err) + } + nsgName := id.Path["networkSecurityGroups"] + networkSecurityGroupNames = append(networkSecurityGroupNames, nsgName) + } + } + + azureRMVirtualNetworkLockNetworkSecurityGroups(&networkSecurityGroupNames) + defer azureRMVirtualNetworkUnlockNetworkSecurityGroups(&networkSecurityGroupNames) + _, err := vnetClient.CreateOrUpdate(resGroup, name, vnet, make(chan struct{})) if err != nil { return err @@ -182,6 +198,8 @@ func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) e resGroup := id.ResourceGroup name := id.Path["virtualNetworks"] + // TODO: lock any associated NSG's + _, err = vnetClient.Delete(resGroup, name, make(chan struct{})) return err @@ -245,3 +263,14 @@ func resourceAzureSubnetHash(v interface{}) int { } return hashcode.String(subnet) } + +func azureRMVirtualNetworkUnlockNetworkSecurityGroups(networkSecurityGroupNames *[]string) { + for _, networkSecurityGroupName := range *networkSecurityGroupNames { + armMutexKV.Unlock(networkSecurityGroupName) + } +} +func azureRMVirtualNetworkLockNetworkSecurityGroups(networkSecurityGroupNames *[]string) { + for _, networkSecurityGroupName := range *networkSecurityGroupNames { + armMutexKV.Lock(networkSecurityGroupName) + } +} \ No newline at end of file From b93e6e3af75d8a4e70ed091b44faa9e7a589b514 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 13 Apr 2017 15:28:13 +0100 Subject: [PATCH 197/342] Locking on the delete too --- .../azurerm/resource_arm_virtual_network.go | 47 +++++++++++++++++-- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go index 9f6172036..e8ad55c97 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ b/builtin/providers/azurerm/resource_arm_virtual_network.go @@ -100,12 +100,11 @@ func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) e networkSecurityGroupNames := make([]string, 0) for _, subnet := range *vnet.VirtualNetworkPropertiesFormat.Subnets { if subnet.NetworkSecurityGroup != nil { - subnetId := *subnet.NetworkSecurityGroup.ID - id, err := parseAzureResourceID(subnetId) + nsgName, err := parseNetworkSecurityGroupName(*subnet.NetworkSecurityGroup.ID) if err != nil { - return fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", subnetId, err) + return err } - nsgName := id.Path["networkSecurityGroups"] + networkSecurityGroupNames = append(networkSecurityGroupNames, nsgName) } } @@ -198,7 +197,13 @@ func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) e resGroup := id.ResourceGroup name := id.Path["virtualNetworks"] - // TODO: lock any associated NSG's + nsgNames, err := expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d) + if err != nil { + return fmt.Errorf("[ERROR] Error parsing Network Security Group ID's: %+v", err) + } + + azureRMVirtualNetworkLockNetworkSecurityGroups(&nsgNames) + defer azureRMVirtualNetworkUnlockNetworkSecurityGroups(&nsgNames) _, err = vnetClient.Delete(resGroup, name, make(chan struct{})) @@ -264,6 +269,38 @@ func resourceAzureSubnetHash(v interface{}) int { return hashcode.String(subnet) } +func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error) { + id, err := parseAzureResourceID(networkSecurityGroupId) + if err != nil { + return "", fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", networkSecurityGroupId, err) + } + + return id.Path["networkSecurityGroups"], nil +} + +func expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d *schema.ResourceData) ([]string, error) { + nsgNames := make([]string, 0) + + if v, ok := d.GetOk("subnet"); ok { + subnets := v.(*schema.Set).List() + for _, subnet := range subnets { + subnet := subnet.(map[string]interface{}) + + networkSecurityGroupId := subnet["security_group"].(string) + if networkSecurityGroupId != "" { + nsgName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) + if err != nil { + return nil, err + } + + nsgNames = append(nsgNames, nsgName) + } + } + } + + return nsgNames, nil +} + func azureRMVirtualNetworkUnlockNetworkSecurityGroups(networkSecurityGroupNames *[]string) { for _, networkSecurityGroupName := range *networkSecurityGroupNames { armMutexKV.Unlock(networkSecurityGroupName) From 3ecb0f4fc4e6d4a553e7f5441b9bbf0e1b6ba49e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 13 Apr 2017 16:28:48 +0100 Subject: [PATCH 198/342] Locking on the NSG ID --- .../resource_arm_network_interface_card.go | 19 +++++++++++++++++++ .../providers/azurerm/resource_arm_subnet.go | 19 +++++++++++++++++++ .../azurerm/resource_arm_virtual_network.go | 9 --------- builtin/providers/azurerm/resourceid.go | 9 +++++++++ 4 files changed, 47 insertions(+), 9 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_network_interface_card.go b/builtin/providers/azurerm/resource_arm_network_interface_card.go index 7450519b1..3c74bc3e0 100644 --- a/builtin/providers/azurerm/resource_arm_network_interface_card.go +++ b/builtin/providers/azurerm/resource_arm_network_interface_card.go @@ -172,6 +172,14 @@ func resourceArmNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) properties.NetworkSecurityGroup = &network.SecurityGroup{ ID: &nsgId, } + + networkSecurityGroupName, err := parseNetworkSecurityGroupName(nsgId) + if err != nil { + return err + } + + armMutexKV.Lock(networkSecurityGroupName) + defer armMutexKV.Unlock(networkSecurityGroupName) } dns, hasDns := d.GetOk("dns_servers") @@ -308,6 +316,17 @@ func resourceArmNetworkInterfaceDelete(d *schema.ResourceData, meta interface{}) resGroup := id.ResourceGroup name := id.Path["networkInterfaces"] + if v, ok := d.GetOk("network_security_group_id"); ok { + networkSecurityGroupId := v.(string) + networkSecurityGroupName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) + if err != nil { + return err + } + + armMutexKV.Lock(networkSecurityGroupName) + defer armMutexKV.Unlock(networkSecurityGroupName) + } + _, err = ifaceClient.Delete(resGroup, name, make(chan struct{})) return err diff --git a/builtin/providers/azurerm/resource_arm_subnet.go b/builtin/providers/azurerm/resource_arm_subnet.go index c5329b9f8..65df4f447 100644 --- a/builtin/providers/azurerm/resource_arm_subnet.go +++ b/builtin/providers/azurerm/resource_arm_subnet.go @@ -89,6 +89,14 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error { properties.NetworkSecurityGroup = &network.SecurityGroup{ ID: &nsgId, } + + networkSecurityGroupName, err := parseNetworkSecurityGroupName(nsgId) + if err != nil { + return err + } + + armMutexKV.Lock(networkSecurityGroupName) + defer armMutexKV.Unlock(networkSecurityGroupName) } if v, ok := d.GetOk("route_table_id"); ok { @@ -182,6 +190,17 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error { name := id.Path["subnets"] vnetName := id.Path["virtualNetworks"] + if v, ok := d.GetOk("network_security_group_id"); ok { + networkSecurityGroupId := v.(string) + networkSecurityGroupName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) + if err != nil { + return err + } + + armMutexKV.Lock(networkSecurityGroupName) + defer armMutexKV.Unlock(networkSecurityGroupName) + } + armMutexKV.Lock(vnetName) defer armMutexKV.Unlock(vnetName) diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go index e8ad55c97..6be08d9f1 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ b/builtin/providers/azurerm/resource_arm_virtual_network.go @@ -269,15 +269,6 @@ func resourceAzureSubnetHash(v interface{}) int { return hashcode.String(subnet) } -func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error) { - id, err := parseAzureResourceID(networkSecurityGroupId) - if err != nil { - return "", fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", networkSecurityGroupId, err) - } - - return id.Path["networkSecurityGroups"], nil -} - func expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d *schema.ResourceData) ([]string, error) { nsgNames := make([]string, 0) diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go index b05f4d75f..5a0a19a8a 100644 --- a/builtin/providers/azurerm/resourceid.go +++ b/builtin/providers/azurerm/resourceid.go @@ -95,3 +95,12 @@ func parseAzureResourceID(id string) (*ResourceID, error) { return idObj, nil } + +func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error) { + id, err := parseAzureResourceID(networkSecurityGroupId) + if err != nil { + return "", fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", networkSecurityGroupId, err) + } + + return id.Path["networkSecurityGroups"], nil +} \ No newline at end of file From fd03d0310e8f69a72b35694e170a4282bfaf27bd Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 13 Apr 2017 17:13:41 +0100 Subject: [PATCH 199/342] Formatting --- builtin/providers/azurerm/resource_arm_virtual_network.go | 2 +- builtin/providers/azurerm/resourceid.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go index 6be08d9f1..e4f38e09d 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ b/builtin/providers/azurerm/resource_arm_virtual_network.go @@ -301,4 +301,4 @@ func azureRMVirtualNetworkLockNetworkSecurityGroups(networkSecurityGroupNames *[ for _, networkSecurityGroupName := range *networkSecurityGroupNames { armMutexKV.Lock(networkSecurityGroupName) } -} \ No newline at end of file +} diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go index 5a0a19a8a..f981b410b 100644 --- a/builtin/providers/azurerm/resourceid.go +++ b/builtin/providers/azurerm/resourceid.go @@ -103,4 +103,4 @@ func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error } return id.Path["networkSecurityGroups"], nil -} \ No newline at end of file +} From 079043d5f83f1b253215af0c013c9157c1fa9e76 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 18 Apr 2017 16:20:29 +0100 Subject: [PATCH 200/342] Checking the type of the subnet before using it --- builtin/providers/azurerm/resource_arm_virtual_network.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go index e4f38e09d..217323a6f 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ b/builtin/providers/azurerm/resource_arm_virtual_network.go @@ -275,7 +275,10 @@ func expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d *schema.Resou if v, ok := d.GetOk("subnet"); ok { subnets := v.(*schema.Set).List() for _, subnet := range subnets { - subnet := subnet.(map[string]interface{}) + subnet, ok := subnet.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("[ERROR] Subnet should be a Hash - was '%+v'", subnet) + } networkSecurityGroupId := subnet["security_group"].(string) if networkSecurityGroupId != "" { From 3e8dca19a9b1f71fae6f76c455f1b856ba801268 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Tue, 18 Apr 2017 17:37:10 +0100 Subject: [PATCH 201/342] Updating the changelog to include #13637 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index af1a69ed6..0e036f20d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ BUG FIXES: * provider/aws: Remove aws_network_acl_rule if not found [GH-13608] * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes [GH-13699] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] + * provider/azurerm: Locking around Network Security Group / Subnets [GH-13637] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] * provider/openstack: Fix updating Ports [GH-13604] From bcacf3a036a675ad58ec32c58c48657429df8566 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 18 Apr 2017 14:10:27 -0400 Subject: [PATCH 202/342] Update ldlogo --- website/source/assets/images/logo-text.svg | 12 +++++------- website/source/layouts/layout.erb | 2 +- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/website/source/assets/images/logo-text.svg b/website/source/assets/images/logo-text.svg index 5ff7645c2..67c40535c 100644 --- a/website/source/assets/images/logo-text.svg +++ b/website/source/assets/images/logo-text.svg @@ -1,9 +1,7 @@ - - - - - - - + + + + + diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index d4fea022d..b3a6e1e7b 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -136,7 +136,7 @@ "alternateName": "Terraform by HashiCorp", "manufacturer": "HashiCorp", "url": "https://www.terraform.io", - "logo": "<%= File.join(base_url, image_path("logo-text.svg")) %>", + "logo": "<%= File.join(base_url, image_path("logo-hashicorp.svg")) %>", "sameAs": [ "https://github.com/hashicorp/terraform" ] From 507917df43fac5ce25185a2437ad5b5c2e40424f Mon Sep 17 00:00:00 2001 From: Joseph Herlant Date: Tue, 18 Apr 2017 11:30:54 -0700 Subject: [PATCH 203/342] provider/fastly: upgrade go-fastly to fix #12910 (#13648) * Adding acceptance tests to reproduce issue #12910 * Upgrade go-fastly and its dependencies and move the Version to int as changed upstream --- .../fastly/resource_fastly_service_v1.go | 44 +++++----- .../resource_fastly_service_v1_gzip_test.go | 6 +- ...resource_fastly_service_v1_headers_test.go | 8 +- ...urce_fastly_service_v1_healthcheck_test.go | 4 +- ...ource_fastly_service_v1_papertrail_test.go | 4 +- ..._fastly_service_v1_response_object_test.go | 4 +- ...source_fastly_service_v1_s3logging_test.go | 8 +- .../fastly/resource_fastly_service_v1_test.go | 54 +++++++++++- vendor/github.com/ajg/form/README.md | 5 ++ vendor/github.com/ajg/form/TODO.md | 1 - vendor/github.com/ajg/form/decode.go | 50 +++++------ vendor/github.com/ajg/form/encode.go | 71 ++++++++-------- .../github.com/sethvargo/go-fastly/Makefile | 82 ++++++++++++------- .../github.com/sethvargo/go-fastly/backend.go | 32 ++++---- .../sethvargo/go-fastly/cache_setting.go | 32 ++++---- .../github.com/sethvargo/go-fastly/client.go | 24 +++--- .../sethvargo/go-fastly/condition.go | 32 ++++---- .../sethvargo/go-fastly/dictionary.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/diff.go | 14 ++-- .../sethvargo/go-fastly/director.go | 32 ++++---- .../sethvargo/go-fastly/director_backend.go | 20 ++--- .../github.com/sethvargo/go-fastly/domain.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/ftp.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/gcs.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/gzip.go | 32 ++++---- .../github.com/sethvargo/go-fastly/header.go | 32 ++++---- .../sethvargo/go-fastly/health_check.go | 32 ++++---- .../sethvargo/go-fastly/logentries.go | 32 ++++---- .../sethvargo/go-fastly/papertrail.go | 32 ++++---- .../sethvargo/go-fastly/request_setting.go | 32 ++++---- .../sethvargo/go-fastly/response_object.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/s3.go | 32 ++++---- .../sethvargo/go-fastly/settings.go | 16 ++-- .../sethvargo/go-fastly/sumologic.go | 32 ++++---- .../github.com/sethvargo/go-fastly/syslog.go | 32 ++++---- vendor/github.com/sethvargo/go-fastly/vcl.go | 44 +++++----- .../github.com/sethvargo/go-fastly/version.go | 58 ++++++------- .../sethvargo/go-fastly/wordpress.go | 32 ++++---- vendor/gopkg.in/yaml.v2/decode.go | 1 - vendor/gopkg.in/yaml.v2/emitterc.go | 1 - vendor/gopkg.in/yaml.v2/parserc.go | 1 - vendor/gopkg.in/yaml.v2/resolve.go | 11 ++- vendor/gopkg.in/yaml.v2/scannerc.go | 2 +- vendor/vendor.json | 18 ++-- 44 files changed, 621 insertions(+), 538 deletions(-) diff --git a/builtin/providers/fastly/resource_fastly_service_v1.go b/builtin/providers/fastly/resource_fastly_service_v1.go index e3b4a7c7b..db9f6df2d 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1.go +++ b/builtin/providers/fastly/resource_fastly_service_v1.go @@ -37,7 +37,7 @@ func resourceServiceV1() *schema.Resource { // creating and activating. It's used internally, but also exported for // users to see. "active_version": { - Type: schema.TypeString, + Type: schema.TypeInt, Computed: true, }, @@ -866,14 +866,14 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error { } if needsChange { - latestVersion := d.Get("active_version").(string) - if latestVersion == "" { + latestVersion := d.Get("active_version").(int) + if latestVersion == 0 { // If the service was just created, there is an empty Version 1 available // that is unlocked and can be updated - latestVersion = "1" + latestVersion = 1 } else { // Clone the latest version, giving us an unlocked version we can modify - log.Printf("[DEBUG] Creating clone of version (%s) for updates", latestVersion) + log.Printf("[DEBUG] Creating clone of version (%d) for updates", latestVersion) newVersion, err := conn.CloneVersion(&gofastly.CloneVersionInput{ Service: d.Id(), Version: latestVersion, @@ -1684,7 +1684,7 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error { Version: latestVersion, }) if err != nil { - return fmt.Errorf("[ERR] Error activating version (%s): %s", latestVersion, err) + return fmt.Errorf("[ERR] Error activating version (%d): %s", latestVersion, err) } // Only if the version is valid and activated do we set the active_version. @@ -1726,7 +1726,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { // If CreateService succeeds, but initial updates to the Service fail, we'll // have an empty ActiveService version (no version is active, so we can't // query for information on it) - if s.ActiveVersion.Number != "" { + if s.ActiveVersion.Number != 0 { settingsOpts := gofastly.GetSettingsInput{ Service: d.Id(), Version: s.ActiveVersion.Number, @@ -1735,7 +1735,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { d.Set("default_host", settings.DefaultHost) d.Set("default_ttl", settings.DefaultTTL) } else { - return fmt.Errorf("[ERR] Error looking up Version settings for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Version settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } // TODO: update go-fastly to support an ActiveVersion struct, which contains @@ -1748,7 +1748,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } // Refresh Domains @@ -1766,7 +1766,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } bl := flattenBackends(backendList) @@ -1783,7 +1783,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } hl := flattenHeaders(headerList) @@ -1800,7 +1800,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } gl := flattenGzips(gzipsList) @@ -1817,7 +1817,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } hcl := flattenHealthchecks(healthcheckList) @@ -1834,7 +1834,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } sl := flattenS3s(s3List) @@ -1851,7 +1851,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } pl := flattenPapertrails(papertrailList) @@ -1868,7 +1868,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Sumologic for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Sumologic for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } sul := flattenSumologics(sumologicList) @@ -1884,7 +1884,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } rol := flattenResponseObjects(responseObjectList) @@ -1901,7 +1901,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } cl := flattenConditions(conditionList) @@ -1918,7 +1918,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Request Settings for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Request Settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } rl := flattenRequestSettings(rsList) @@ -1934,7 +1934,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { Version: s.ActiveVersion.Number, }) if err != nil { - return fmt.Errorf("[ERR] Error looking up VCLs for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up VCLs for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } vl := flattenVCLs(vclList) @@ -1950,7 +1950,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { Version: s.ActiveVersion.Number, }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Cache Settings for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Cache Settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) } csl := flattenCacheSettings(cslList) @@ -1981,7 +1981,7 @@ func resourceServiceV1Delete(d *schema.ResourceData, meta interface{}) error { return err } - if s.ActiveVersion.Number != "" { + if s.ActiveVersion.Number != 0 { _, err := conn.DeactivateVersion(&gofastly.DeactivateVersionInput{ Service: d.Id(), Version: s.ActiveVersion.Number, diff --git a/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go b/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go index 8c4e2be5a..c1e9db762 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go @@ -108,20 +108,20 @@ func TestAccFastlyServiceV1_gzips_basic(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.Gzip{ - Version: "1", + Version: 1, Name: "gzip file types", Extensions: "js css", CacheCondition: "testing_condition", } log2 := gofastly.Gzip{ - Version: "1", + Version: 1, Name: "gzip extensions", ContentTypes: "text/css text/html", } log3 := gofastly.Gzip{ - Version: "1", + Version: 1, Name: "all", Extensions: "js html css", ContentTypes: "text/javascript application/x-javascript application/javascript text/css text/html", diff --git a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go b/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go index 3ad1e2879..1715cee8b 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go @@ -81,7 +81,7 @@ func TestAccFastlyServiceV1_headers_basic(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.Header{ - Version: "1", + Version: 1, Name: "remove x-amz-request-id", Destination: "http.x-amz-request-id", Type: "cache", @@ -90,7 +90,7 @@ func TestAccFastlyServiceV1_headers_basic(t *testing.T) { } log2 := gofastly.Header{ - Version: "1", + Version: 1, Name: "remove s3 server", Destination: "http.Server", Type: "cache", @@ -100,7 +100,7 @@ func TestAccFastlyServiceV1_headers_basic(t *testing.T) { } log3 := gofastly.Header{ - Version: "1", + Version: 1, Name: "DESTROY S3", Destination: "http.Server", Type: "cache", @@ -109,7 +109,7 @@ func TestAccFastlyServiceV1_headers_basic(t *testing.T) { } log4 := gofastly.Header{ - Version: "1", + Version: 1, Name: "Add server name", Destination: "http.server-name", Type: "request", diff --git a/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go b/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go index 04ee8dd25..e78bd0505 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go @@ -17,7 +17,7 @@ func TestAccFastlyServiceV1_healthcheck_basic(t *testing.T) { domainName := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.HealthCheck{ - Version: "1", + Version: 1, Name: "example-healthcheck1", Host: "example1.com", Path: "/test1.txt", @@ -32,7 +32,7 @@ func TestAccFastlyServiceV1_healthcheck_basic(t *testing.T) { } log2 := gofastly.HealthCheck{ - Version: "1", + Version: 1, Name: "example-healthcheck2", Host: "example2.com", Path: "/test2.txt", diff --git a/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go b/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go index 69500f258..f68fbd68f 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go @@ -17,7 +17,7 @@ func TestAccFastlyServiceV1_papertrail_basic(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.Papertrail{ - Version: "1", + Version: 1, Name: "papertrailtesting", Address: "test1.papertrailapp.com", Port: uint(3600), @@ -26,7 +26,7 @@ func TestAccFastlyServiceV1_papertrail_basic(t *testing.T) { } log2 := gofastly.Papertrail{ - Version: "1", + Version: 1, Name: "papertrailtesting2", Address: "test2.papertrailapp.com", Port: uint(8080), diff --git a/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go b/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go index 330cde8d8..5399e70a3 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go @@ -17,7 +17,7 @@ func TestAccFastlyServiceV1_response_object_basic(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.ResponseObject{ - Version: "1", + Version: 1, Name: "responseObjecttesting", Status: 200, Response: "OK", @@ -28,7 +28,7 @@ func TestAccFastlyServiceV1_response_object_basic(t *testing.T) { } log2 := gofastly.ResponseObject{ - Version: "1", + Version: 1, Name: "responseObjecttesting2", Status: 404, Response: "Not Found", diff --git a/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go b/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go index 68a91983f..e85c88755 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go @@ -18,7 +18,7 @@ func TestAccFastlyServiceV1_s3logging_basic(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.S3{ - Version: "1", + Version: 1, Name: "somebucketlog", BucketName: "fastlytestlogging", Domain: "s3-us-west-2.amazonaws.com", @@ -33,7 +33,7 @@ func TestAccFastlyServiceV1_s3logging_basic(t *testing.T) { } log2 := gofastly.S3{ - Version: "1", + Version: 1, Name: "someotherbucketlog", BucketName: "fastlytestlogging2", Domain: "s3-us-west-2.amazonaws.com", @@ -89,7 +89,7 @@ func TestAccFastlyServiceV1_s3logging_s3_env(t *testing.T) { defer resetEnv() log3 := gofastly.S3{ - Version: "1", + Version: 1, Name: "somebucketlog", BucketName: "fastlytestlogging", Domain: "s3-us-west-2.amazonaws.com", @@ -128,7 +128,7 @@ func TestAccFastlyServiceV1_s3logging_formatVersion(t *testing.T) { domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) log1 := gofastly.S3{ - Version: "1", + Version: 1, Name: "somebucketlog", BucketName: "fastlytestlogging", Domain: "s3-us-west-2.amazonaws.com", diff --git a/builtin/providers/fastly/resource_fastly_service_v1_test.go b/builtin/providers/fastly/resource_fastly_service_v1_test.go index ba6ca5592..93c03799a 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_test.go @@ -173,7 +173,7 @@ func TestAccFastlyServiceV1_updateBackend(t *testing.T) { }, resource.TestStep{ - Config: testAccServiceV1Config_backend_update(name, backendName, backendName2), + Config: testAccServiceV1Config_backend_update(name, backendName, backendName2, 3400), Check: resource.ComposeTestCheckFunc( testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), @@ -357,6 +357,52 @@ func testAccCheckFastlyServiceV1Attributes_backends(service *gofastly.ServiceDet } } +func TestAccFastlyServiceV1_defaultTTL(t *testing.T) { + var service gofastly.ServiceDetail + name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + backendName := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) + backendName2 := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckServiceV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccServiceV1Config_backend(name, backendName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), + testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName}), + ), + }, + + resource.TestStep{ + Config: testAccServiceV1Config_backend_update(name, backendName, backendName2, 3400), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), + testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), + resource.TestCheckResourceAttr( + "fastly_service_v1.foo", "default_ttl", "3400"), + resource.TestCheckResourceAttr( + "fastly_service_v1.foo", "active_version", "2"), + ), + }, + // Now update the default_ttl to 0 and encounter the issue https://github.com/hashicorp/terraform/issues/12910 + resource.TestStep{ + Config: testAccServiceV1Config_backend_update(name, backendName, backendName2, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), + testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), + resource.TestCheckResourceAttr( + "fastly_service_v1.foo", "default_ttl", "0"), + resource.TestCheckResourceAttr( + "fastly_service_v1.foo", "active_version", "3"), + ), + }, + }, + }) +} + func testAccCheckServiceV1Destroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "fastly_service_v1" { @@ -441,12 +487,12 @@ resource "fastly_service_v1" "foo" { }`, name, backend) } -func testAccServiceV1Config_backend_update(name, backend, backend2 string) string { +func testAccServiceV1Config_backend_update(name, backend, backend2 string, ttl uint) string { return fmt.Sprintf(` resource "fastly_service_v1" "foo" { name = "%s" - default_ttl = 3400 + default_ttl = %d domain { name = "test.notadomain.com" @@ -464,5 +510,5 @@ resource "fastly_service_v1" "foo" { } force_destroy = true -}`, name, backend, backend2) +}`, name, ttl, backend, backend2) } diff --git a/vendor/github.com/ajg/form/README.md b/vendor/github.com/ajg/form/README.md index 7117f4812..ad99be4b1 100644 --- a/vendor/github.com/ajg/form/README.md +++ b/vendor/github.com/ajg/form/README.md @@ -28,6 +28,7 @@ Usage ```go import "github.com/ajg/form" +// or: "gopkg.in/ajg/form.v1" ``` Given a type like the following... @@ -126,6 +127,10 @@ While encouraged, it is not necessary to define a type (e.g. a `struct`) in orde - Composite values will be treated as a `map[string]interface{}`, itself able to contain nested values (both scalar and compound) ad infinitum. - However, if there is a value (of any supported type) already present in a map for a given key, then it will be used when possible, rather than being replaced with a generic value as specified above; this makes it possible to handle partially typed, dynamic or schema-less values. +### Zero Values + +By default, and without custom marshaling, zero values (also known as empty/default values) are encoded as the empty string. To disable this behavior, meaning to keep zero values in their literal form (e.g. `0` for integral types), `Encoder` offers a `KeepZeros` setter method, which will do just that when set to `true`. + ### Unsupported Values Values of the following kinds aren't supported and, if present, must be ignored. diff --git a/vendor/github.com/ajg/form/TODO.md b/vendor/github.com/ajg/form/TODO.md index 672fd4657..d34472798 100644 --- a/vendor/github.com/ajg/form/TODO.md +++ b/vendor/github.com/ajg/form/TODO.md @@ -2,4 +2,3 @@ TODO ==== - Document IgnoreCase and IgnoreUnknownKeys in README. - - Fix want/have newlines in tests. diff --git a/vendor/github.com/ajg/form/decode.go b/vendor/github.com/ajg/form/decode.go index 3346fffe5..dd8bd4f29 100644 --- a/vendor/github.com/ajg/form/decode.go +++ b/vendor/github.com/ajg/form/decode.go @@ -14,13 +14,13 @@ import ( "time" ) -// NewDecoder returns a new form decoder. -func NewDecoder(r io.Reader) *decoder { - return &decoder{r, defaultDelimiter, defaultEscape, false, false} +// NewDecoder returns a new form Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r, defaultDelimiter, defaultEscape, false, false} } -// decoder decodes data from a form (application/x-www-form-urlencoded). -type decoder struct { +// Decoder decodes data from a form (application/x-www-form-urlencoded). +type Decoder struct { r io.Reader d rune e rune @@ -28,20 +28,20 @@ type decoder struct { ignoreCase bool } -// DelimitWith sets r as the delimiter used for composite keys by decoder d and returns the latter; it is '.' by default. -func (d *decoder) DelimitWith(r rune) *decoder { +// DelimitWith sets r as the delimiter used for composite keys by Decoder d and returns the latter; it is '.' by default. +func (d *Decoder) DelimitWith(r rune) *Decoder { d.d = r return d } -// EscapeWith sets r as the escape used for delimiters (and to escape itself) by decoder d and returns the latter; it is '\\' by default. -func (d *decoder) EscapeWith(r rune) *decoder { +// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Decoder d and returns the latter; it is '\\' by default. +func (d *Decoder) EscapeWith(r rune) *Decoder { d.e = r return d } // Decode reads in and decodes form-encoded data into dst. -func (d decoder) Decode(dst interface{}) error { +func (d Decoder) Decode(dst interface{}) error { bs, err := ioutil.ReadAll(d.r) if err != nil { return err @@ -54,20 +54,20 @@ func (d decoder) Decode(dst interface{}) error { return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v))) } -// IgnoreUnknownKeys if set to true it will make the decoder ignore values +// IgnoreUnknownKeys if set to true it will make the Decoder ignore values // that are not found in the destination object instead of returning an error. -func (d *decoder) IgnoreUnknownKeys(ignoreUnknown bool) { +func (d *Decoder) IgnoreUnknownKeys(ignoreUnknown bool) { d.ignoreUnknown = ignoreUnknown } -// IgnoreCase if set to true it will make the decoder try to set values in the +// IgnoreCase if set to true it will make the Decoder try to set values in the // destination object even if the case does not match. -func (d *decoder) IgnoreCase(ignoreCase bool) { +func (d *Decoder) IgnoreCase(ignoreCase bool) { d.ignoreCase = ignoreCase } // DecodeString decodes src into dst. -func (d decoder) DecodeString(dst interface{}, src string) error { +func (d Decoder) DecodeString(dst interface{}, src string) error { vs, err := url.ParseQuery(src) if err != nil { return err @@ -77,7 +77,7 @@ func (d decoder) DecodeString(dst interface{}, src string) error { } // DecodeValues decodes vs into dst. -func (d decoder) DecodeValues(dst interface{}, vs url.Values) error { +func (d Decoder) DecodeValues(dst interface{}, vs url.Values) error { v := reflect.ValueOf(dst) return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v))) } @@ -92,7 +92,7 @@ func DecodeValues(dst interface{}, vs url.Values) error { return NewDecoder(nil).DecodeValues(dst, vs) } -func (d decoder) decodeNode(v reflect.Value, n node) (err error) { +func (d Decoder) decodeNode(v reflect.Value, n node) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) @@ -106,7 +106,7 @@ func (d decoder) decodeNode(v reflect.Value, n node) (err error) { return nil } -func (d decoder) decodeValue(v reflect.Value, x interface{}) { +func (d Decoder) decodeValue(v reflect.Value, x interface{}) { t := v.Type() k := v.Kind() @@ -163,7 +163,7 @@ func (d decoder) decodeValue(v reflect.Value, x interface{}) { } } -func (d decoder) decodeStruct(v reflect.Value, x interface{}) { +func (d Decoder) decodeStruct(v reflect.Value, x interface{}) { t := v.Type() for k, c := range getNode(x) { if f, ok := findField(v, k, d.ignoreCase); !ok && k == "" { @@ -180,7 +180,7 @@ func (d decoder) decodeStruct(v reflect.Value, x interface{}) { } } -func (d decoder) decodeMap(v reflect.Value, x interface{}) { +func (d Decoder) decodeMap(v reflect.Value, x interface{}) { t := v.Type() if v.IsNil() { v.Set(reflect.MakeMap(t)) @@ -216,7 +216,7 @@ func (d decoder) decodeMap(v reflect.Value, x interface{}) { } } -func (d decoder) decodeArray(v reflect.Value, x interface{}) { +func (d Decoder) decodeArray(v reflect.Value, x interface{}) { t := v.Type() for k, c := range getNode(x) { i, err := strconv.Atoi(k) @@ -230,7 +230,7 @@ func (d decoder) decodeArray(v reflect.Value, x interface{}) { } } -func (d decoder) decodeSlice(v reflect.Value, x interface{}) { +func (d Decoder) decodeSlice(v reflect.Value, x interface{}) { t := v.Type() if t.Elem().Kind() == reflect.Uint8 { // Allow, but don't require, byte slices to be encoded as a single string. @@ -265,7 +265,7 @@ func (d decoder) decodeSlice(v reflect.Value, x interface{}) { } } -func (d decoder) decodeBasic(v reflect.Value, x interface{}) { +func (d Decoder) decodeBasic(v reflect.Value, x interface{}) { t := v.Type() switch k, s := t.Kind(), getString(x); k { case reflect.Bool: @@ -316,7 +316,7 @@ func (d decoder) decodeBasic(v reflect.Value, x interface{}) { } } -func (d decoder) decodeTime(v reflect.Value, x interface{}) { +func (d Decoder) decodeTime(v reflect.Value, x interface{}) { t := v.Type() s := getString(x) // TODO: Find a more efficient way to do this. @@ -329,7 +329,7 @@ func (d decoder) decodeTime(v reflect.Value, x interface{}) { panic("cannot decode string `" + s + "` as " + t.String()) } -func (d decoder) decodeURL(v reflect.Value, x interface{}) { +func (d Decoder) decodeURL(v reflect.Value, x interface{}) { t := v.Type() s := getString(x) if u, err := url.Parse(s); err == nil { diff --git a/vendor/github.com/ajg/form/encode.go b/vendor/github.com/ajg/form/encode.go index 3e824c6c6..57a0d0a57 100644 --- a/vendor/github.com/ajg/form/encode.go +++ b/vendor/github.com/ajg/form/encode.go @@ -16,34 +16,41 @@ import ( "time" ) -// NewEncoder returns a new form encoder. -func NewEncoder(w io.Writer) *encoder { - return &encoder{w, defaultDelimiter, defaultEscape} +// NewEncoder returns a new form Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w, defaultDelimiter, defaultEscape, false} } -// encoder provides a way to encode to a Writer. -type encoder struct { +// Encoder provides a way to encode to a Writer. +type Encoder struct { w io.Writer d rune e rune + z bool } -// DelimitWith sets r as the delimiter used for composite keys by encoder e and returns the latter; it is '.' by default. -func (e *encoder) DelimitWith(r rune) *encoder { +// DelimitWith sets r as the delimiter used for composite keys by Encoder e and returns the latter; it is '.' by default. +func (e *Encoder) DelimitWith(r rune) *Encoder { e.d = r return e } -// EscapeWith sets r as the escape used for delimiters (and to escape itself) by encoder e and returns the latter; it is '\\' by default. -func (e *encoder) EscapeWith(r rune) *encoder { +// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Encoder e and returns the latter; it is '\\' by default. +func (e *Encoder) EscapeWith(r rune) *Encoder { e.e = r return e } -// Encode encodes dst as form and writes it out using the encoder's Writer. -func (e encoder) Encode(dst interface{}) error { +// KeepZeros sets whether Encoder e should keep zero (default) values in their literal form when encoding, and returns the former; by default zero values are not kept, but are rather encoded as the empty string. +func (e *Encoder) KeepZeros(z bool) *Encoder { + e.z = z + return e +} + +// Encode encodes dst as form and writes it out using the Encoder's Writer. +func (e Encoder) Encode(dst interface{}) error { v := reflect.ValueOf(dst) - n, err := encodeToNode(v) + n, err := encodeToNode(v, e.z) if err != nil { return err } @@ -61,7 +68,7 @@ func (e encoder) Encode(dst interface{}) error { // EncodeToString encodes dst as a form and returns it as a string. func EncodeToString(dst interface{}) (string, error) { v := reflect.ValueOf(dst) - n, err := encodeToNode(v) + n, err := encodeToNode(v, false) if err != nil { return "", err } @@ -72,7 +79,7 @@ func EncodeToString(dst interface{}) (string, error) { // EncodeToValues encodes dst as a form and returns it as Values. func EncodeToValues(dst interface{}) (url.Values, error) { v := reflect.ValueOf(dst) - n, err := encodeToNode(v) + n, err := encodeToNode(v, false) if err != nil { return nil, err } @@ -80,41 +87,41 @@ func EncodeToValues(dst interface{}) (url.Values, error) { return vs, nil } -func encodeToNode(v reflect.Value) (n node, err error) { +func encodeToNode(v reflect.Value, z bool) (n node, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) } }() - return getNode(encodeValue(v)), nil + return getNode(encodeValue(v, z)), nil } -func encodeValue(v reflect.Value) interface{} { +func encodeValue(v reflect.Value, z bool) interface{} { t := v.Type() k := v.Kind() if s, ok := marshalValue(v); ok { return s - } else if isEmptyValue(v) { + } else if !z && isEmptyValue(v) { return "" // Treat the zero value as the empty string. } switch k { case reflect.Ptr, reflect.Interface: - return encodeValue(v.Elem()) + return encodeValue(v.Elem(), z) case reflect.Struct: if t.ConvertibleTo(timeType) { return encodeTime(v) } else if t.ConvertibleTo(urlType) { return encodeURL(v) } - return encodeStruct(v) + return encodeStruct(v, z) case reflect.Slice: - return encodeSlice(v) + return encodeSlice(v, z) case reflect.Array: - return encodeArray(v) + return encodeArray(v, z) case reflect.Map: - return encodeMap(v) + return encodeMap(v, z) case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func: panic(t.String() + " has unsupported kind " + t.Kind().String()) default: @@ -122,7 +129,7 @@ func encodeValue(v reflect.Value) interface{} { } } -func encodeStruct(v reflect.Value) interface{} { +func encodeStruct(v reflect.Value, z bool) interface{} { t := v.Type() n := node{} for i := 0; i < t.NumField(); i++ { @@ -134,37 +141,37 @@ func encodeStruct(v reflect.Value) interface{} { } else if fv := v.Field(i); oe && isEmptyValue(fv) { delete(n, k) } else { - n[k] = encodeValue(fv) + n[k] = encodeValue(fv, z) } } return n } -func encodeMap(v reflect.Value) interface{} { +func encodeMap(v reflect.Value, z bool) interface{} { n := node{} for _, i := range v.MapKeys() { - k := getString(encodeValue(i)) - n[k] = encodeValue(v.MapIndex(i)) + k := getString(encodeValue(i, z)) + n[k] = encodeValue(v.MapIndex(i), z) } return n } -func encodeArray(v reflect.Value) interface{} { +func encodeArray(v reflect.Value, z bool) interface{} { n := node{} for i := 0; i < v.Len(); i++ { - n[strconv.Itoa(i)] = encodeValue(v.Index(i)) + n[strconv.Itoa(i)] = encodeValue(v.Index(i), z) } return n } -func encodeSlice(v reflect.Value) interface{} { +func encodeSlice(v reflect.Value, z bool) interface{} { t := v.Type() if t.Elem().Kind() == reflect.Uint8 { return string(v.Bytes()) // Encode byte slices as a single string by default. } n := node{} for i := 0; i < v.Len(); i++ { - n[strconv.Itoa(i)] = encodeValue(v.Index(i)) + n[strconv.Itoa(i)] = encodeValue(v.Index(i), z) } return n } diff --git a/vendor/github.com/sethvargo/go-fastly/Makefile b/vendor/github.com/sethvargo/go-fastly/Makefile index 2addc380e..14a75743c 100644 --- a/vendor/github.com/sethvargo/go-fastly/Makefile +++ b/vendor/github.com/sethvargo/go-fastly/Makefile @@ -1,42 +1,62 @@ -TEST?=./... -NAME?=$(shell basename "${CURDIR}") -EXTERNAL_TOOLS=\ - github.com/mitchellh/gox +# Metadata about this makefile and position +MKFILE_PATH := $(lastword $(MAKEFILE_LIST)) +CURRENT_DIR := $(dir $(realpath $(MKFILE_PATH))) +CURRENT_DIR := $(CURRENT_DIR:/=) -default: test +# Get the project metadata +GOVERSION := 1.8 +PROJECT := github.com/sethvargo/go-fastly +OWNER := $(dir $(PROJECT)) +OWNER := $(notdir $(OWNER:/=)) +NAME := $(notdir $(PROJECT)) +EXTERNAL_TOOLS = -# test runs the test suite and vets the code. -test: generate - @echo "==> Running tests..." - @go list $(TEST) \ - | grep -v "github.com/sethvargo/${NAME}/vendor" \ - | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} +# List of tests to run +TEST ?= ./... -# testrace runs the race checker -testrace: generate - @echo "==> Running tests (race)..." - @go list $(TEST) \ - | grep -v "github.com/sethvargo/${NAME}/vendor" \ - | xargs -n1 go test -timeout=60s -race ${TESTARGS} +# List all our actual files, excluding vendor +GOFILES = $(shell go list $(TEST) | grep -v /vendor/) -# updatedeps installs all the dependencies needed to run and build. -updatedeps: - @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" +# Tags specific for building +GOTAGS ?= -# generate runs `go generate` to build the dynamically generated source files. -generate: - @echo "==> Generating..." - @find . -type f -name '.DS_Store' -delete - @go list ./... \ - | grep -v "github.com/hashicorp/${NAME}/vendor" \ - | xargs -n1 go generate +# Number of procs to use +GOMAXPROCS ?= 4 -# bootstrap installs the necessary go tools for development/build. +# bootstrap installs the necessary go tools for development or build bootstrap: - @echo "==> Bootstrapping..." + @echo "==> Bootstrapping ${PROJECT}..." @for t in ${EXTERNAL_TOOLS}; do \ - echo "--> Installing "$$t"..." ; \ + echo "--> Installing $$t" ; \ go get -u "$$t"; \ done -.PHONY: default test testrace updatedeps generate bootstrap +# deps gets all the dependencies for this repository and vendors them. +deps: + @echo "==> Updating dependencies..." + @docker run \ + --interactive \ + --tty \ + --rm \ + --dns=8.8.8.8 \ + --env="GOMAXPROCS=${GOMAXPROCS}" \ + --workdir="/go/src/${PROJECT}" \ + --volume="${CURRENT_DIR}:/go/src/${PROJECT}" \ + "golang:${GOVERSION}" /usr/bin/env sh -c "scripts/deps.sh" + +# generate runs the code generator +generate: + @echo "==> Generating ${PROJECT}..." + @go generate ${GOFILES} + +# test runs the test suite +test: + @echo "==> Testing ${PROJECT}..." + @go test -timeout=60s -parallel=20 -tags="${GOTAGS}" ${GOFILES} ${TESTARGS} + +# test-race runs the race checker +test-race: + @echo "==> Testing ${PROJECT} (race)..." + @go test -timeout=60s -race -tags="${GOTAGS}" ${GOFILES} ${TESTARGS} + +.PHONY: bootstrap deps generate test test-race diff --git a/vendor/github.com/sethvargo/go-fastly/backend.go b/vendor/github.com/sethvargo/go-fastly/backend.go index 0d08445fa..0e2642ece 100644 --- a/vendor/github.com/sethvargo/go-fastly/backend.go +++ b/vendor/github.com/sethvargo/go-fastly/backend.go @@ -8,7 +8,7 @@ import ( // Backend represents a backend response from the Fastly API. type Backend struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Address string `mapstructure:"address"` @@ -53,7 +53,7 @@ type ListBackendsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListBackends returns the list of backends for the configuration version. @@ -62,11 +62,11 @@ func (c *Client) ListBackends(i *ListBackendsInput) ([]*Backend, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/backend", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/backend", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -85,7 +85,7 @@ type CreateBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Address string `form:"address,omitempty"` @@ -119,11 +119,11 @@ func (c *Client) CreateBackend(i *CreateBackendInput) (*Backend, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/backend", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/backend", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -141,7 +141,7 @@ type GetBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the backend to fetch. Name string @@ -153,7 +153,7 @@ func (c *Client) GetBackend(i *GetBackendInput) (*Backend, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -161,7 +161,7 @@ func (c *Client) GetBackend(i *GetBackendInput) (*Backend, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/backend/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/backend/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -179,7 +179,7 @@ type UpdateBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the backend to update. Name string @@ -216,7 +216,7 @@ func (c *Client) UpdateBackend(i *UpdateBackendInput) (*Backend, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -224,7 +224,7 @@ func (c *Client) UpdateBackend(i *UpdateBackendInput) (*Backend, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/backend/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/backend/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -242,7 +242,7 @@ type DeleteBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the backend to delete (required). Name string @@ -254,7 +254,7 @@ func (c *Client) DeleteBackend(i *DeleteBackendInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -262,7 +262,7 @@ func (c *Client) DeleteBackend(i *DeleteBackendInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/backend/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/backend/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/cache_setting.go b/vendor/github.com/sethvargo/go-fastly/cache_setting.go index 79ba5c64c..5a90f9b0e 100644 --- a/vendor/github.com/sethvargo/go-fastly/cache_setting.go +++ b/vendor/github.com/sethvargo/go-fastly/cache_setting.go @@ -22,7 +22,7 @@ type CacheSettingAction string // CacheSetting represents a response from Fastly's API for cache settings. type CacheSetting struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Action CacheSettingAction `mapstructure:"action"` @@ -47,7 +47,7 @@ type ListCacheSettingsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListCacheSettings returns the list of cache settings for the configuration @@ -57,11 +57,11 @@ func (c *Client) ListCacheSettings(i *ListCacheSettingsInput) ([]*CacheSetting, return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/cache_settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/cache_settings", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -80,7 +80,7 @@ type CreateCacheSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Action CacheSettingAction `form:"action,omitempty"` @@ -95,11 +95,11 @@ func (c *Client) CreateCacheSetting(i *CreateCacheSettingInput) (*CacheSetting, return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/cache_settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/cache_settings", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -117,7 +117,7 @@ type GetCacheSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the cache setting to fetch. Name string @@ -130,7 +130,7 @@ func (c *Client) GetCacheSetting(i *GetCacheSettingInput) (*CacheSetting, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -138,7 +138,7 @@ func (c *Client) GetCacheSetting(i *GetCacheSettingInput) (*CacheSetting, error) return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/cache_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/cache_settings/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -156,7 +156,7 @@ type UpdateCacheSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the cache setting to update. Name string @@ -174,7 +174,7 @@ func (c *Client) UpdateCacheSetting(i *UpdateCacheSettingInput) (*CacheSetting, return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -182,7 +182,7 @@ func (c *Client) UpdateCacheSetting(i *UpdateCacheSettingInput) (*CacheSetting, return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/cache_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/cache_settings/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -200,7 +200,7 @@ type DeleteCacheSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the cache setting to delete (required). Name string @@ -212,7 +212,7 @@ func (c *Client) DeleteCacheSetting(i *DeleteCacheSettingInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -220,7 +220,7 @@ func (c *Client) DeleteCacheSetting(i *DeleteCacheSettingInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/cache_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/cache_settings/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/client.go b/vendor/github.com/sethvargo/go-fastly/client.go index 4c670601f..30a54a09c 100644 --- a/vendor/github.com/sethvargo/go-fastly/client.go +++ b/vendor/github.com/sethvargo/go-fastly/client.go @@ -64,20 +64,24 @@ func DefaultClient() *Client { return client } -// NewClient creates a new API client with the given key. Because Fastly allows -// some requests without an API key, this function will not error if the API -// token is not supplied. Attempts to make a request that requires an API key -// will return a 403 response. +// NewClient creates a new API client with the given key and the default API +// endpoint. Because Fastly allows some requests without an API key, this +// function will not error if the API token is not supplied. Attempts to make a +// request that requires an API key will return a 403 response. func NewClient(key string) (*Client, error) { - client := &Client{apiKey: key} + return NewClientForEndpoint(key, DefaultEndpoint) +} + +// NewClientForEndpoint creates a new API client with the given key and API +// endpoint. Because Fastly allows some requests without an API key, this +// function will not error if the API token is not supplied. Attempts to make a +// request that requires an API key will return a 403 response. +func NewClientForEndpoint(key string, endpoint string) (*Client, error) { + client := &Client{apiKey: key, Address: endpoint} return client.init() } func (c *Client) init() (*Client, error) { - if len(c.Address) == 0 { - c.Address = DefaultEndpoint - } - u, err := url.Parse(c.Address) if err != nil { return nil, err @@ -155,7 +159,7 @@ func (c *Client) RequestForm(verb, p string, i interface{}, ro *RequestOptions) ro.Headers["Content-Type"] = "application/x-www-form-urlencoded" buf := new(bytes.Buffer) - if err := form.NewEncoder(buf).DelimitWith('|').Encode(i); err != nil { + if err := form.NewEncoder(buf).KeepZeros(true).DelimitWith('|').Encode(i); err != nil { return nil, err } body := buf.String() diff --git a/vendor/github.com/sethvargo/go-fastly/condition.go b/vendor/github.com/sethvargo/go-fastly/condition.go index b88b61ad0..43a7c70c2 100644 --- a/vendor/github.com/sethvargo/go-fastly/condition.go +++ b/vendor/github.com/sethvargo/go-fastly/condition.go @@ -8,7 +8,7 @@ import ( // Condition represents a condition response from the Fastly API. type Condition struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Statement string `mapstructure:"statement"` @@ -32,7 +32,7 @@ type ListConditionsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListConditions returns the list of conditions for the configuration version. @@ -41,11 +41,11 @@ func (c *Client) ListConditions(i *ListConditionsInput) ([]*Condition, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/condition", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/condition", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -64,7 +64,7 @@ type CreateConditionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Statement string `form:"statement,omitempty"` @@ -78,11 +78,11 @@ func (c *Client) CreateCondition(i *CreateConditionInput) (*Condition, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/condition", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/condition", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -100,7 +100,7 @@ type GetConditionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the condition to fetch. Name string @@ -112,7 +112,7 @@ func (c *Client) GetCondition(i *GetConditionInput) (*Condition, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -120,7 +120,7 @@ func (c *Client) GetCondition(i *GetConditionInput) (*Condition, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/condition/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/condition/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -138,7 +138,7 @@ type UpdateConditionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the condition to update. Name string @@ -154,7 +154,7 @@ func (c *Client) UpdateCondition(i *UpdateConditionInput) (*Condition, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -162,7 +162,7 @@ func (c *Client) UpdateCondition(i *UpdateConditionInput) (*Condition, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/condition/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/condition/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -180,7 +180,7 @@ type DeleteConditionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the condition to delete (required). Name string @@ -192,7 +192,7 @@ func (c *Client) DeleteCondition(i *DeleteConditionInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -200,7 +200,7 @@ func (c *Client) DeleteCondition(i *DeleteConditionInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/condition/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/condition/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/dictionary.go b/vendor/github.com/sethvargo/go-fastly/dictionary.go index c3d883802..2a1ae2518 100644 --- a/vendor/github.com/sethvargo/go-fastly/dictionary.go +++ b/vendor/github.com/sethvargo/go-fastly/dictionary.go @@ -8,7 +8,7 @@ import ( // Dictionary represents a dictionary response from the Fastly API. type Dictionary struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` ID string `mapstructure:"id"` Name string `mapstructure:"name"` @@ -31,7 +31,7 @@ type ListDictionariesInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListDictionaries returns the list of dictionaries for the configuration version. @@ -40,11 +40,11 @@ func (c *Client) ListDictionaries(i *ListDictionariesInput) ([]*Dictionary, erro return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/dictionary", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/dictionary", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -63,7 +63,7 @@ type CreateDictionaryInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` } @@ -74,11 +74,11 @@ func (c *Client) CreateDictionary(i *CreateDictionaryInput) (*Dictionary, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/dictionary", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/dictionary", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -96,7 +96,7 @@ type GetDictionaryInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the dictionary to fetch. Name string @@ -108,7 +108,7 @@ func (c *Client) GetDictionary(i *GetDictionaryInput) (*Dictionary, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -116,7 +116,7 @@ func (c *Client) GetDictionary(i *GetDictionaryInput) (*Dictionary, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/dictionary/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/dictionary/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -134,7 +134,7 @@ type UpdateDictionaryInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the dictionary to update. Name string @@ -148,7 +148,7 @@ func (c *Client) UpdateDictionary(i *UpdateDictionaryInput) (*Dictionary, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -156,7 +156,7 @@ func (c *Client) UpdateDictionary(i *UpdateDictionaryInput) (*Dictionary, error) return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/dictionary/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/dictionary/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -174,7 +174,7 @@ type DeleteDictionaryInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the dictionary to delete (required). Name string @@ -186,7 +186,7 @@ func (c *Client) DeleteDictionary(i *DeleteDictionaryInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -194,7 +194,7 @@ func (c *Client) DeleteDictionary(i *DeleteDictionaryInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/dictionary/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/dictionary/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/diff.go b/vendor/github.com/sethvargo/go-fastly/diff.go index 3cb07b94b..63e798825 100644 --- a/vendor/github.com/sethvargo/go-fastly/diff.go +++ b/vendor/github.com/sethvargo/go-fastly/diff.go @@ -5,8 +5,8 @@ import "fmt" // Diff represents a diff of two versions as a response from the Fastly API. type Diff struct { Format string `mapstructure:"format"` - From string `mapstructure:"from"` - To string `mapstructure:"to"` + From int `mapstructure:"from"` + To int `mapstructure:"to"` Diff string `mapstructure:"diff"` } @@ -18,10 +18,10 @@ type GetDiffInput struct { // From is the version to diff from. This can either be a string indicating a // positive number (e.g. "1") or a negative number from "-1" down ("-1" is the // latest version). - From string + From int // To is the version to diff up to. The same rules for From apply. - To string + To int // Format is an optional field to specify the format with which the diff will // be returned. Acceptable values are "text" (default), "html", or @@ -35,15 +35,15 @@ func (c *Client) GetDiff(i *GetDiffInput) (*Diff, error) { return nil, ErrMissingService } - if i.From == "" { + if i.From == 0 { return nil, ErrMissingFrom } - if i.To == "" { + if i.To == 0 { return nil, ErrMissingTo } - path := fmt.Sprintf("service/%s/diff/from/%s/to/%s", i.Service, i.From, i.To) + path := fmt.Sprintf("service/%s/diff/from/%d/to/%d", i.Service, i.From, i.To) resp, err := c.Get(path, nil) if err != nil { return nil, err diff --git a/vendor/github.com/sethvargo/go-fastly/director.go b/vendor/github.com/sethvargo/go-fastly/director.go index d5c3dee63..418774c31 100644 --- a/vendor/github.com/sethvargo/go-fastly/director.go +++ b/vendor/github.com/sethvargo/go-fastly/director.go @@ -25,7 +25,7 @@ type DirectorType uint8 // Director represents a director response from the Fastly API. type Director struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Comment string `mapstructure:"comment"` @@ -51,7 +51,7 @@ type ListDirectorsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListDirectors returns the list of directors for the configuration version. @@ -60,11 +60,11 @@ func (c *Client) ListDirectors(i *ListDirectorsInput) ([]*Director, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/director", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/director", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -83,7 +83,7 @@ type CreateDirectorInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Comment string `form:"comment,omitempty"` @@ -98,11 +98,11 @@ func (c *Client) CreateDirector(i *CreateDirectorInput) (*Director, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/director", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/director", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -120,7 +120,7 @@ type GetDirectorInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the director to fetch. Name string @@ -132,7 +132,7 @@ func (c *Client) GetDirector(i *GetDirectorInput) (*Director, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -140,7 +140,7 @@ func (c *Client) GetDirector(i *GetDirectorInput) (*Director, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/director/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/director/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -158,7 +158,7 @@ type UpdateDirectorInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the director to update. Name string @@ -175,7 +175,7 @@ func (c *Client) UpdateDirector(i *UpdateDirectorInput) (*Director, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -183,7 +183,7 @@ func (c *Client) UpdateDirector(i *UpdateDirectorInput) (*Director, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/director/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/director/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -201,7 +201,7 @@ type DeleteDirectorInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the director to delete (required). Name string @@ -213,7 +213,7 @@ func (c *Client) DeleteDirector(i *DeleteDirectorInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -221,7 +221,7 @@ func (c *Client) DeleteDirector(i *DeleteDirectorInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/director/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/director/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/director_backend.go b/vendor/github.com/sethvargo/go-fastly/director_backend.go index 5cbde76b2..7ca6bcdda 100644 --- a/vendor/github.com/sethvargo/go-fastly/director_backend.go +++ b/vendor/github.com/sethvargo/go-fastly/director_backend.go @@ -9,7 +9,7 @@ import ( // Fastly API. type DirectorBackend struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Director string `mapstructure:"director_name"` Backend string `mapstructure:"backend_name"` @@ -24,7 +24,7 @@ type CreateDirectorBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Director is the name of the director (required). Director string @@ -39,7 +39,7 @@ func (c *Client) CreateDirectorBackend(i *CreateDirectorBackendInput) (*Director return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -51,7 +51,7 @@ func (c *Client) CreateDirectorBackend(i *CreateDirectorBackendInput) (*Director return nil, ErrMissingBackend } - path := fmt.Sprintf("/service/%s/version/%s/director/%s/backend/%s", + path := fmt.Sprintf("/service/%s/version/%d/director/%s/backend/%s", i.Service, i.Version, i.Director, i.Backend) resp, err := c.PostForm(path, i, nil) if err != nil { @@ -70,7 +70,7 @@ type GetDirectorBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Director is the name of the director (required). Director string @@ -85,7 +85,7 @@ func (c *Client) GetDirectorBackend(i *GetDirectorBackendInput) (*DirectorBacken return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -97,7 +97,7 @@ func (c *Client) GetDirectorBackend(i *GetDirectorBackendInput) (*DirectorBacken return nil, ErrMissingBackend } - path := fmt.Sprintf("/service/%s/version/%s/director/%s/backend/%s", + path := fmt.Sprintf("/service/%s/version/%d/director/%s/backend/%s", i.Service, i.Version, i.Director, i.Backend) resp, err := c.Get(path, nil) if err != nil { @@ -116,7 +116,7 @@ type DeleteDirectorBackendInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Director is the name of the director (required). Director string @@ -131,7 +131,7 @@ func (c *Client) DeleteDirectorBackend(i *DeleteDirectorBackendInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -143,7 +143,7 @@ func (c *Client) DeleteDirectorBackend(i *DeleteDirectorBackendInput) error { return ErrMissingBackend } - path := fmt.Sprintf("/service/%s/version/%s/director/%s/backend/%s", + path := fmt.Sprintf("/service/%s/version/%d/director/%s/backend/%s", i.Service, i.Version, i.Director, i.Backend) resp, err := c.Delete(path, nil) if err != nil { diff --git a/vendor/github.com/sethvargo/go-fastly/domain.go b/vendor/github.com/sethvargo/go-fastly/domain.go index 56356f8eb..b41c2262f 100644 --- a/vendor/github.com/sethvargo/go-fastly/domain.go +++ b/vendor/github.com/sethvargo/go-fastly/domain.go @@ -8,7 +8,7 @@ import ( // Domain represents the the domain name Fastly will serve content for. type Domain struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Comment string `mapstructure:"comment"` @@ -30,7 +30,7 @@ type ListDomainsInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // ListDomains returns the list of domains for this Service. @@ -39,11 +39,11 @@ func (c *Client) ListDomains(i *ListDomainsInput) ([]*Domain, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/domain", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/domain", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -62,7 +62,7 @@ type CreateDomainInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the domain that the service will respond to (required). Name string `form:"name"` @@ -77,11 +77,11 @@ func (c *Client) CreateDomain(i *CreateDomainInput) (*Domain, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/domain", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/domain", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -99,7 +99,7 @@ type GetDomainInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the domain to fetch. Name string `form:"name"` @@ -111,7 +111,7 @@ func (c *Client) GetDomain(i *GetDomainInput) (*Domain, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -119,7 +119,7 @@ func (c *Client) GetDomain(i *GetDomainInput) (*Domain, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/domain/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/domain/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -137,7 +137,7 @@ type UpdateDomainInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the domain that the service will respond to (required). Name string @@ -156,7 +156,7 @@ func (c *Client) UpdateDomain(i *UpdateDomainInput) (*Domain, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -164,7 +164,7 @@ func (c *Client) UpdateDomain(i *UpdateDomainInput) (*Domain, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/domain/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/domain/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -182,7 +182,7 @@ type DeleteDomainInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the domain that the service will respond to (required). Name string `form:"name"` @@ -194,7 +194,7 @@ func (c *Client) DeleteDomain(i *DeleteDomainInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -202,7 +202,7 @@ func (c *Client) DeleteDomain(i *DeleteDomainInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/domain/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/domain/%s", i.Service, i.Version, i.Name) _, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/ftp.go b/vendor/github.com/sethvargo/go-fastly/ftp.go index 95cdf828e..5bdc10119 100644 --- a/vendor/github.com/sethvargo/go-fastly/ftp.go +++ b/vendor/github.com/sethvargo/go-fastly/ftp.go @@ -9,7 +9,7 @@ import ( // FTP represents an FTP logging response from the Fastly API. type FTP struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Address string `mapstructure:"address"` @@ -43,7 +43,7 @@ type ListFTPsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListFTPs returns the list of ftps for the configuration version. @@ -52,11 +52,11 @@ func (c *Client) ListFTPs(i *ListFTPsInput) ([]*FTP, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/ftp", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/ftp", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -75,7 +75,7 @@ type CreateFTPInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Address string `form:"address,omitempty"` @@ -96,11 +96,11 @@ func (c *Client) CreateFTP(i *CreateFTPInput) (*FTP, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/ftp", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/ftp", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -118,7 +118,7 @@ type GetFTPInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the FTP to fetch. Name string @@ -130,7 +130,7 @@ func (c *Client) GetFTP(i *GetFTPInput) (*FTP, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -138,7 +138,7 @@ func (c *Client) GetFTP(i *GetFTPInput) (*FTP, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/ftp/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/ftp/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -156,7 +156,7 @@ type UpdateFTPInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the FTP to update. Name string @@ -180,7 +180,7 @@ func (c *Client) UpdateFTP(i *UpdateFTPInput) (*FTP, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -188,7 +188,7 @@ func (c *Client) UpdateFTP(i *UpdateFTPInput) (*FTP, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/ftp/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/ftp/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -206,7 +206,7 @@ type DeleteFTPInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the FTP to delete (required). Name string @@ -218,7 +218,7 @@ func (c *Client) DeleteFTP(i *DeleteFTPInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -226,7 +226,7 @@ func (c *Client) DeleteFTP(i *DeleteFTPInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/ftp/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/ftp/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/gcs.go b/vendor/github.com/sethvargo/go-fastly/gcs.go index ef813ac51..950fa7e7f 100644 --- a/vendor/github.com/sethvargo/go-fastly/gcs.go +++ b/vendor/github.com/sethvargo/go-fastly/gcs.go @@ -8,7 +8,7 @@ import ( // GCS represents an GCS logging response from the Fastly API. type GCS struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Bucket string `mapstructure:"bucket_name"` @@ -38,7 +38,7 @@ type ListGCSsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListGCSs returns the list of gcses for the configuration version. @@ -47,11 +47,11 @@ func (c *Client) ListGCSs(i *ListGCSsInput) ([]*GCS, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/gcs", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/gcs", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -70,7 +70,7 @@ type CreateGCSInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Bucket string `form:"bucket_name,omitempty"` @@ -90,11 +90,11 @@ func (c *Client) CreateGCS(i *CreateGCSInput) (*GCS, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/gcs", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/gcs", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -112,7 +112,7 @@ type GetGCSInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the GCS to fetch. Name string @@ -124,7 +124,7 @@ func (c *Client) GetGCS(i *GetGCSInput) (*GCS, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -132,7 +132,7 @@ func (c *Client) GetGCS(i *GetGCSInput) (*GCS, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/gcs/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/gcs/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -150,7 +150,7 @@ type UpdateGCSInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the GCS to update. Name string @@ -173,7 +173,7 @@ func (c *Client) UpdateGCS(i *UpdateGCSInput) (*GCS, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -181,7 +181,7 @@ func (c *Client) UpdateGCS(i *UpdateGCSInput) (*GCS, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/gcs/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/gcs/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -199,7 +199,7 @@ type DeleteGCSInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the GCS to delete (required). Name string @@ -211,7 +211,7 @@ func (c *Client) DeleteGCS(i *DeleteGCSInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -219,7 +219,7 @@ func (c *Client) DeleteGCS(i *DeleteGCSInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/gcs/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/gcs/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/gzip.go b/vendor/github.com/sethvargo/go-fastly/gzip.go index 2b9f6d80b..002d454dc 100644 --- a/vendor/github.com/sethvargo/go-fastly/gzip.go +++ b/vendor/github.com/sethvargo/go-fastly/gzip.go @@ -8,7 +8,7 @@ import ( // Gzip represents an Gzip logging response from the Fastly API. type Gzip struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` ContentTypes string `mapstructure:"content_types"` @@ -32,7 +32,7 @@ type ListGzipsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListGzips returns the list of gzips for the configuration version. @@ -41,11 +41,11 @@ func (c *Client) ListGzips(i *ListGzipsInput) ([]*Gzip, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/gzip", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/gzip", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -64,7 +64,7 @@ type CreateGzipInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` ContentTypes string `form:"content_types"` @@ -78,11 +78,11 @@ func (c *Client) CreateGzip(i *CreateGzipInput) (*Gzip, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/gzip", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/gzip", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -100,7 +100,7 @@ type GetGzipInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the Gzip to fetch. Name string @@ -112,7 +112,7 @@ func (c *Client) GetGzip(i *GetGzipInput) (*Gzip, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -120,7 +120,7 @@ func (c *Client) GetGzip(i *GetGzipInput) (*Gzip, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/gzip/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/gzip/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -138,7 +138,7 @@ type UpdateGzipInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the Gzip to update. Name string @@ -155,7 +155,7 @@ func (c *Client) UpdateGzip(i *UpdateGzipInput) (*Gzip, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -163,7 +163,7 @@ func (c *Client) UpdateGzip(i *UpdateGzipInput) (*Gzip, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/gzip/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/gzip/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -181,7 +181,7 @@ type DeleteGzipInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the Gzip to delete (required). Name string @@ -193,7 +193,7 @@ func (c *Client) DeleteGzip(i *DeleteGzipInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -201,7 +201,7 @@ func (c *Client) DeleteGzip(i *DeleteGzipInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/gzip/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/gzip/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/header.go b/vendor/github.com/sethvargo/go-fastly/header.go index 21bcc0cc1..d4af0d37a 100644 --- a/vendor/github.com/sethvargo/go-fastly/header.go +++ b/vendor/github.com/sethvargo/go-fastly/header.go @@ -51,7 +51,7 @@ type HeaderType string // Header represents a header response from the Fastly API. type Header struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Action HeaderAction `mapstructure:"action"` @@ -83,7 +83,7 @@ type ListHeadersInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListHeaders returns the list of headers for the configuration version. @@ -92,11 +92,11 @@ func (c *Client) ListHeaders(i *ListHeadersInput) ([]*Header, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/header", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/header", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -115,7 +115,7 @@ type CreateHeaderInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Action HeaderAction `form:"action,omitempty"` @@ -137,11 +137,11 @@ func (c *Client) CreateHeader(i *CreateHeaderInput) (*Header, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/header", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/header", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -159,7 +159,7 @@ type GetHeaderInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the header to fetch. Name string @@ -171,7 +171,7 @@ func (c *Client) GetHeader(i *GetHeaderInput) (*Header, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -179,7 +179,7 @@ func (c *Client) GetHeader(i *GetHeaderInput) (*Header, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/header/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/header/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -197,7 +197,7 @@ type UpdateHeaderInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the header to update. Name string @@ -222,7 +222,7 @@ func (c *Client) UpdateHeader(i *UpdateHeaderInput) (*Header, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -230,7 +230,7 @@ func (c *Client) UpdateHeader(i *UpdateHeaderInput) (*Header, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/header/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/header/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -248,7 +248,7 @@ type DeleteHeaderInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the header to delete (required). Name string @@ -260,7 +260,7 @@ func (c *Client) DeleteHeader(i *DeleteHeaderInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -268,7 +268,7 @@ func (c *Client) DeleteHeader(i *DeleteHeaderInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/header/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/header/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/health_check.go b/vendor/github.com/sethvargo/go-fastly/health_check.go index 5a091d2c2..a264c3dd4 100644 --- a/vendor/github.com/sethvargo/go-fastly/health_check.go +++ b/vendor/github.com/sethvargo/go-fastly/health_check.go @@ -8,7 +8,7 @@ import ( // HealthCheck represents a health check response from the Fastly API. type HealthCheck struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Method string `mapstructure:"method"` @@ -39,7 +39,7 @@ type ListHealthChecksInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListHealthChecks returns the list of health checks for the configuration @@ -49,11 +49,11 @@ func (c *Client) ListHealthChecks(i *ListHealthChecksInput) ([]*HealthCheck, err return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/healthcheck", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/healthcheck", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -72,7 +72,7 @@ type CreateHealthCheckInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Method string `form:"method,omitempty"` @@ -93,11 +93,11 @@ func (c *Client) CreateHealthCheck(i *CreateHealthCheckInput) (*HealthCheck, err return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/healthcheck", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/healthcheck", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -115,7 +115,7 @@ type GetHealthCheckInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the health check to fetch. Name string @@ -127,7 +127,7 @@ func (c *Client) GetHealthCheck(i *GetHealthCheckInput) (*HealthCheck, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -135,7 +135,7 @@ func (c *Client) GetHealthCheck(i *GetHealthCheckInput) (*HealthCheck, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/healthcheck/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/healthcheck/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -153,7 +153,7 @@ type UpdateHealthCheckInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the health check to update. Name string @@ -177,7 +177,7 @@ func (c *Client) UpdateHealthCheck(i *UpdateHealthCheckInput) (*HealthCheck, err return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -185,7 +185,7 @@ func (c *Client) UpdateHealthCheck(i *UpdateHealthCheckInput) (*HealthCheck, err return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/healthcheck/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/healthcheck/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -203,7 +203,7 @@ type DeleteHealthCheckInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the health check to delete (required). Name string @@ -215,7 +215,7 @@ func (c *Client) DeleteHealthCheck(i *DeleteHealthCheckInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -223,7 +223,7 @@ func (c *Client) DeleteHealthCheck(i *DeleteHealthCheckInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/healthcheck/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/healthcheck/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/logentries.go b/vendor/github.com/sethvargo/go-fastly/logentries.go index 4296cede3..1815eee0a 100644 --- a/vendor/github.com/sethvargo/go-fastly/logentries.go +++ b/vendor/github.com/sethvargo/go-fastly/logentries.go @@ -9,7 +9,7 @@ import ( // Logentries represents a logentries response from the Fastly API. type Logentries struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Port uint `mapstructure:"port"` @@ -38,7 +38,7 @@ type ListLogentriesInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListLogentries returns the list of logentries for the configuration version. @@ -47,11 +47,11 @@ func (c *Client) ListLogentries(i *ListLogentriesInput) ([]*Logentries, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/logentries", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/logentries", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -70,7 +70,7 @@ type CreateLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Port uint `form:"port,omitempty"` @@ -86,11 +86,11 @@ func (c *Client) CreateLogentries(i *CreateLogentriesInput) (*Logentries, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/logentries", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/logentries", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -108,7 +108,7 @@ type GetLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the logentries to fetch. Name string @@ -120,7 +120,7 @@ func (c *Client) GetLogentries(i *GetLogentriesInput) (*Logentries, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -128,7 +128,7 @@ func (c *Client) GetLogentries(i *GetLogentriesInput) (*Logentries, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/logentries/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -146,7 +146,7 @@ type UpdateLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the logentries to update. Name string @@ -165,7 +165,7 @@ func (c *Client) UpdateLogentries(i *UpdateLogentriesInput) (*Logentries, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -173,7 +173,7 @@ func (c *Client) UpdateLogentries(i *UpdateLogentriesInput) (*Logentries, error) return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/logentries/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -191,7 +191,7 @@ type DeleteLogentriesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the logentries to delete (required). Name string @@ -203,7 +203,7 @@ func (c *Client) DeleteLogentries(i *DeleteLogentriesInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -211,7 +211,7 @@ func (c *Client) DeleteLogentries(i *DeleteLogentriesInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/logentries/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/logentries/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/papertrail.go b/vendor/github.com/sethvargo/go-fastly/papertrail.go index 294c5b1c1..3b7d85e27 100644 --- a/vendor/github.com/sethvargo/go-fastly/papertrail.go +++ b/vendor/github.com/sethvargo/go-fastly/papertrail.go @@ -9,7 +9,7 @@ import ( // Papertrail represents a papertrail response from the Fastly API. type Papertrail struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Address string `mapstructure:"address"` @@ -37,7 +37,7 @@ type ListPapertrailsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListPapertrails returns the list of papertrails for the configuration version. @@ -46,11 +46,11 @@ func (c *Client) ListPapertrails(i *ListPapertrailsInput) ([]*Papertrail, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/papertrail", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/papertrail", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -69,7 +69,7 @@ type CreatePapertrailInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Address string `form:"address,omitempty"` @@ -87,11 +87,11 @@ func (c *Client) CreatePapertrail(i *CreatePapertrailInput) (*Papertrail, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/papertrail", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/papertrail", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -109,7 +109,7 @@ type GetPapertrailInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the papertrail to fetch. Name string @@ -121,7 +121,7 @@ func (c *Client) GetPapertrail(i *GetPapertrailInput) (*Papertrail, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -129,7 +129,7 @@ func (c *Client) GetPapertrail(i *GetPapertrailInput) (*Papertrail, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/papertrail/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/papertrail/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -147,7 +147,7 @@ type UpdatePapertrailInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the papertrail to update. Name string @@ -168,7 +168,7 @@ func (c *Client) UpdatePapertrail(i *UpdatePapertrailInput) (*Papertrail, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -176,7 +176,7 @@ func (c *Client) UpdatePapertrail(i *UpdatePapertrailInput) (*Papertrail, error) return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/papertrail/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/papertrail/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -194,7 +194,7 @@ type DeletePapertrailInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the papertrail to delete (required). Name string @@ -206,7 +206,7 @@ func (c *Client) DeletePapertrail(i *DeletePapertrailInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -214,7 +214,7 @@ func (c *Client) DeletePapertrail(i *DeletePapertrailInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/papertrail/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/papertrail/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/request_setting.go b/vendor/github.com/sethvargo/go-fastly/request_setting.go index 41fb9836d..eef9c2d14 100644 --- a/vendor/github.com/sethvargo/go-fastly/request_setting.go +++ b/vendor/github.com/sethvargo/go-fastly/request_setting.go @@ -40,7 +40,7 @@ type RequestSettingXFF string // RequestSetting represents a request setting response from the Fastly API. type RequestSetting struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` ForceMiss bool `mapstructure:"force_miss"` @@ -73,7 +73,7 @@ type ListRequestSettingsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListRequestSettings returns the list of request settings for the @@ -83,11 +83,11 @@ func (c *Client) ListRequestSettings(i *ListRequestSettingsInput) ([]*RequestSet return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/request_settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/request_settings", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -107,7 +107,7 @@ type CreateRequestSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` ForceMiss *Compatibool `form:"force_miss,omitempty"` @@ -129,11 +129,11 @@ func (c *Client) CreateRequestSetting(i *CreateRequestSettingInput) (*RequestSet return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/request_settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/request_settings", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -151,7 +151,7 @@ type GetRequestSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the request settings to fetch. Name string @@ -164,7 +164,7 @@ func (c *Client) GetRequestSetting(i *GetRequestSettingInput) (*RequestSetting, return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -172,7 +172,7 @@ func (c *Client) GetRequestSetting(i *GetRequestSettingInput) (*RequestSetting, return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/request_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/request_settings/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -191,7 +191,7 @@ type UpdateRequestSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the request settings to update. Name string @@ -216,7 +216,7 @@ func (c *Client) UpdateRequestSetting(i *UpdateRequestSettingInput) (*RequestSet return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -224,7 +224,7 @@ func (c *Client) UpdateRequestSetting(i *UpdateRequestSettingInput) (*RequestSet return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/request_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/request_settings/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -242,7 +242,7 @@ type DeleteRequestSettingInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the request settings to delete (required). Name string @@ -254,7 +254,7 @@ func (c *Client) DeleteRequestSetting(i *DeleteRequestSettingInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -262,7 +262,7 @@ func (c *Client) DeleteRequestSetting(i *DeleteRequestSettingInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/request_settings/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/request_settings/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/response_object.go b/vendor/github.com/sethvargo/go-fastly/response_object.go index 1579d58c8..78bc581c5 100644 --- a/vendor/github.com/sethvargo/go-fastly/response_object.go +++ b/vendor/github.com/sethvargo/go-fastly/response_object.go @@ -8,7 +8,7 @@ import ( // ResponseObject represents a response object response from the Fastly API. type ResponseObject struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Status uint `mapstructure:"status"` @@ -36,7 +36,7 @@ type ListResponseObjectsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListResponseObjects returns the list of response objects for the @@ -46,11 +46,11 @@ func (c *Client) ListResponseObjects(i *ListResponseObjectsInput) ([]*ResponseOb return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/response_object", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/response_object", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -70,7 +70,7 @@ type CreateResponseObjectInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Status uint `form:"status,omitempty"` @@ -87,11 +87,11 @@ func (c *Client) CreateResponseObject(i *CreateResponseObjectInput) (*ResponseOb return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/response_object", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/response_object", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -109,7 +109,7 @@ type GetResponseObjectInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the response object to fetch. Name string @@ -122,7 +122,7 @@ func (c *Client) GetResponseObject(i *GetResponseObjectInput) (*ResponseObject, return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -130,7 +130,7 @@ func (c *Client) GetResponseObject(i *GetResponseObjectInput) (*ResponseObject, return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/response_object/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/response_object/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -149,7 +149,7 @@ type UpdateResponseObjectInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the response object to update. Name string @@ -169,7 +169,7 @@ func (c *Client) UpdateResponseObject(i *UpdateResponseObjectInput) (*ResponseOb return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -177,7 +177,7 @@ func (c *Client) UpdateResponseObject(i *UpdateResponseObjectInput) (*ResponseOb return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/response_object/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/response_object/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -195,7 +195,7 @@ type DeleteResponseObjectInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the response object to delete (required). Name string @@ -207,7 +207,7 @@ func (c *Client) DeleteResponseObject(i *DeleteResponseObjectInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -215,7 +215,7 @@ func (c *Client) DeleteResponseObject(i *DeleteResponseObjectInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/response_object/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/response_object/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/s3.go b/vendor/github.com/sethvargo/go-fastly/s3.go index ed20bf9ad..89fda06c6 100644 --- a/vendor/github.com/sethvargo/go-fastly/s3.go +++ b/vendor/github.com/sethvargo/go-fastly/s3.go @@ -16,7 +16,7 @@ const ( // S3 represents a S3 response from the Fastly API. type S3 struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` BucketName string `mapstructure:"bucket_name"` @@ -52,7 +52,7 @@ type ListS3sInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListS3s returns the list of S3s for the configuration version. @@ -61,11 +61,11 @@ func (c *Client) ListS3s(i *ListS3sInput) ([]*S3, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/s3", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/s3", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -84,7 +84,7 @@ type CreateS3Input struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` BucketName string `form:"bucket_name,omitempty"` @@ -107,11 +107,11 @@ func (c *Client) CreateS3(i *CreateS3Input) (*S3, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/s3", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/s3", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -129,7 +129,7 @@ type GetS3Input struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the S3 to fetch. Name string @@ -141,7 +141,7 @@ func (c *Client) GetS3(i *GetS3Input) (*S3, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -149,7 +149,7 @@ func (c *Client) GetS3(i *GetS3Input) (*S3, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/s3/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/s3/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -167,7 +167,7 @@ type UpdateS3Input struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the S3 to update. Name string @@ -193,7 +193,7 @@ func (c *Client) UpdateS3(i *UpdateS3Input) (*S3, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -201,7 +201,7 @@ func (c *Client) UpdateS3(i *UpdateS3Input) (*S3, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/s3/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/s3/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -219,7 +219,7 @@ type DeleteS3Input struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the S3 to delete (required). Name string @@ -231,7 +231,7 @@ func (c *Client) DeleteS3(i *DeleteS3Input) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -239,7 +239,7 @@ func (c *Client) DeleteS3(i *DeleteS3Input) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/s3/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/s3/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/settings.go b/vendor/github.com/sethvargo/go-fastly/settings.go index 02ea6f234..8208acf15 100644 --- a/vendor/github.com/sethvargo/go-fastly/settings.go +++ b/vendor/github.com/sethvargo/go-fastly/settings.go @@ -5,7 +5,7 @@ import "fmt" // Settings represents a backend response from the Fastly API. type Settings struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` DefaultTTL uint `mapstructure:"general.default_ttl"` DefaultHost string `mapstructure:"general.default_host"` @@ -16,7 +16,7 @@ type GetSettingsInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // GetSettings gets the backend configuration with the given parameters. @@ -25,11 +25,11 @@ func (c *Client) GetSettings(i *GetSettingsInput) (*Settings, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/settings", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -47,9 +47,9 @@ type UpdateSettingsInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int - DefaultTTL uint `form:"general.default_ttl,omitempty"` + DefaultTTL uint `form:"general.default_ttl"` DefaultHost string `form:"general.default_host,omitempty"` } @@ -59,11 +59,11 @@ func (c *Client) UpdateSettings(i *UpdateSettingsInput) (*Settings, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/settings", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/settings", i.Service, i.Version) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err diff --git a/vendor/github.com/sethvargo/go-fastly/sumologic.go b/vendor/github.com/sethvargo/go-fastly/sumologic.go index 2e6b3fba7..c94f98de7 100644 --- a/vendor/github.com/sethvargo/go-fastly/sumologic.go +++ b/vendor/github.com/sethvargo/go-fastly/sumologic.go @@ -9,7 +9,7 @@ import ( // Sumologic represents a sumologic response from the Fastly API. type Sumologic struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Address string `mapstructure:"address"` @@ -39,7 +39,7 @@ type ListSumologicsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListSumologics returns the list of sumologics for the configuration version. @@ -48,11 +48,11 @@ func (c *Client) ListSumologics(i *ListSumologicsInput) ([]*Sumologic, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/sumologic", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/sumologic", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -71,7 +71,7 @@ type CreateSumologicInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Address string `form:"address,omitempty"` @@ -88,11 +88,11 @@ func (c *Client) CreateSumologic(i *CreateSumologicInput) (*Sumologic, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/sumologic", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/sumologic", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -110,7 +110,7 @@ type GetSumologicInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the sumologic to fetch. Name string @@ -122,7 +122,7 @@ func (c *Client) GetSumologic(i *GetSumologicInput) (*Sumologic, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -130,7 +130,7 @@ func (c *Client) GetSumologic(i *GetSumologicInput) (*Sumologic, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/sumologic/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/sumologic/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -148,7 +148,7 @@ type UpdateSumologicInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the sumologic to update. Name string @@ -168,7 +168,7 @@ func (c *Client) UpdateSumologic(i *UpdateSumologicInput) (*Sumologic, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -176,7 +176,7 @@ func (c *Client) UpdateSumologic(i *UpdateSumologicInput) (*Sumologic, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/sumologic/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/sumologic/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -194,7 +194,7 @@ type DeleteSumologicInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the sumologic to delete (required). Name string @@ -206,7 +206,7 @@ func (c *Client) DeleteSumologic(i *DeleteSumologicInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -214,7 +214,7 @@ func (c *Client) DeleteSumologic(i *DeleteSumologicInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/sumologic/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/sumologic/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/syslog.go b/vendor/github.com/sethvargo/go-fastly/syslog.go index 56d61ce31..dc6c548f9 100644 --- a/vendor/github.com/sethvargo/go-fastly/syslog.go +++ b/vendor/github.com/sethvargo/go-fastly/syslog.go @@ -9,7 +9,7 @@ import ( // Syslog represents a syslog response from the Fastly API. type Syslog struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Address string `mapstructure:"address"` @@ -41,7 +41,7 @@ type ListSyslogsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListSyslogs returns the list of syslogs for the configuration version. @@ -50,11 +50,11 @@ func (c *Client) ListSyslogs(i *ListSyslogsInput) ([]*Syslog, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/syslog", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/syslog", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -73,7 +73,7 @@ type CreateSyslogInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Address string `form:"address,omitempty"` @@ -92,11 +92,11 @@ func (c *Client) CreateSyslog(i *CreateSyslogInput) (*Syslog, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/logging/syslog", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/logging/syslog", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -114,7 +114,7 @@ type GetSyslogInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the syslog to fetch. Name string @@ -126,7 +126,7 @@ func (c *Client) GetSyslog(i *GetSyslogInput) (*Syslog, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -134,7 +134,7 @@ func (c *Client) GetSyslog(i *GetSyslogInput) (*Syslog, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/syslog/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/syslog/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -152,7 +152,7 @@ type UpdateSyslogInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the syslog to update. Name string @@ -174,7 +174,7 @@ func (c *Client) UpdateSyslog(i *UpdateSyslogInput) (*Syslog, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -182,7 +182,7 @@ func (c *Client) UpdateSyslog(i *UpdateSyslogInput) (*Syslog, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/syslog/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/syslog/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -200,7 +200,7 @@ type DeleteSyslogInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the syslog to delete (required). Name string @@ -212,7 +212,7 @@ func (c *Client) DeleteSyslog(i *DeleteSyslogInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -220,7 +220,7 @@ func (c *Client) DeleteSyslog(i *DeleteSyslogInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/logging/syslog/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/logging/syslog/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/vcl.go b/vendor/github.com/sethvargo/go-fastly/vcl.go index 63be5a652..dda137be4 100644 --- a/vendor/github.com/sethvargo/go-fastly/vcl.go +++ b/vendor/github.com/sethvargo/go-fastly/vcl.go @@ -8,7 +8,7 @@ import ( // VCL represents a response about VCL from the Fastly API. type VCL struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Main bool `mapstructure:"main"` @@ -31,7 +31,7 @@ type ListVCLsInput struct { Service string // Version is the specific configuration version (required). - Version string + Version int } // ListVCLs returns the list of VCLs for the configuration version. @@ -40,11 +40,11 @@ func (c *Client) ListVCLs(i *ListVCLsInput) ([]*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/vcl", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/vcl", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -63,7 +63,7 @@ type GetVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the VCL to fetch. Name string @@ -75,7 +75,7 @@ func (c *Client) GetVCL(i *GetVCLInput) (*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -83,7 +83,7 @@ func (c *Client) GetVCL(i *GetVCLInput) (*VCL, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/vcl/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/vcl/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -101,7 +101,7 @@ type GetGeneratedVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // GetGeneratedVCL gets the VCL configuration with the given parameters. @@ -110,11 +110,11 @@ func (c *Client) GetGeneratedVCL(i *GetGeneratedVCLInput) (*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/generated_vcl", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/generated_vcl", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -132,7 +132,7 @@ type CreateVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Content string `form:"content,omitempty"` @@ -144,11 +144,11 @@ func (c *Client) CreateVCL(i *CreateVCLInput) (*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/vcl", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/vcl", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -166,7 +166,7 @@ type UpdateVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the VCL to update (required). Name string @@ -181,7 +181,7 @@ func (c *Client) UpdateVCL(i *UpdateVCLInput) (*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -189,7 +189,7 @@ func (c *Client) UpdateVCL(i *UpdateVCLInput) (*VCL, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/vcl/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/vcl/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -207,7 +207,7 @@ type ActivateVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the VCL to mark as main (required). Name string @@ -219,7 +219,7 @@ func (c *Client) ActivateVCL(i *ActivateVCLInput) (*VCL, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -227,7 +227,7 @@ func (c *Client) ActivateVCL(i *ActivateVCLInput) (*VCL, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/vcl/%s/main", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/vcl/%s/main", i.Service, i.Version, i.Name) resp, err := c.Put(path, nil) if err != nil { return nil, err @@ -245,7 +245,7 @@ type DeleteVCLInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the VCL to delete (required). Name string @@ -257,7 +257,7 @@ func (c *Client) DeleteVCL(i *DeleteVCLInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -265,7 +265,7 @@ func (c *Client) DeleteVCL(i *DeleteVCLInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/vcl/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/vcl/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/github.com/sethvargo/go-fastly/version.go b/vendor/github.com/sethvargo/go-fastly/version.go index 8b54c9cee..f245eebfa 100644 --- a/vendor/github.com/sethvargo/go-fastly/version.go +++ b/vendor/github.com/sethvargo/go-fastly/version.go @@ -7,14 +7,14 @@ import ( // Version represents a distinct configuration version. type Version struct { - Number string `mapstructure:"number"` - Comment string `mapstructure:"comment"` - ServiceID string `mapstructure:"service_id"` - Active bool `mapstructure:"active"` - Locked bool `mapstructure:"locked"` - Deployed bool `mapstructure:"deployed"` - Staging bool `mapstructure:"staging"` - Testing bool `mapstructure:"testing"` + Number int `mapstructure:"number"` + Comment string `mapstructure:"comment"` + ServiceID string `mapstructure:"service_id"` + Active bool `mapstructure:"active"` + Locked bool `mapstructure:"locked"` + Deployed bool `mapstructure:"deployed"` + Staging bool `mapstructure:"staging"` + Testing bool `mapstructure:"testing"` } // versionsByNumber is a sortable list of versions. This is used by the version @@ -114,7 +114,7 @@ type GetVersionInput struct { Service string // Version is the version number to fetch (required). - Version string + Version int } // GetVersion fetches a version with the given information. @@ -123,11 +123,11 @@ func (c *Client) GetVersion(i *GetVersionInput) (*Version, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -145,7 +145,7 @@ type UpdateVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Comment string `form:"comment,omitempty"` } @@ -156,11 +156,11 @@ func (c *Client) UpdateVersion(i *UpdateVersionInput) (*Version, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d", i.Service, i.Version) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -178,7 +178,7 @@ type ActivateVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // ActivateVersion activates the given version. @@ -187,11 +187,11 @@ func (c *Client) ActivateVersion(i *ActivateVersionInput) (*Version, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/activate", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/activate", i.Service, i.Version) resp, err := c.Put(path, nil) if err != nil { return nil, err @@ -209,7 +209,7 @@ type DeactivateVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // DeactivateVersion deactivates the given version. @@ -218,11 +218,11 @@ func (c *Client) DeactivateVersion(i *DeactivateVersionInput) (*Version, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/deactivate", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/deactivate", i.Service, i.Version) resp, err := c.Put(path, nil) if err != nil { return nil, err @@ -240,7 +240,7 @@ type CloneVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // CloneVersion creates a clone of the version with and returns a new @@ -251,11 +251,11 @@ func (c *Client) CloneVersion(i *CloneVersionInput) (*Version, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/clone", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/clone", i.Service, i.Version) resp, err := c.Put(path, nil) if err != nil { return nil, err @@ -273,7 +273,7 @@ type ValidateVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // ValidateVersion validates if the given version is okay. @@ -284,11 +284,11 @@ func (c *Client) ValidateVersion(i *ValidateVersionInput) (bool, string, error) return false, msg, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return false, msg, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/validate", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/validate", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return false, msg, err @@ -308,7 +308,7 @@ type LockVersionInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // LockVersion locks the specified version. @@ -317,11 +317,11 @@ func (c *Client) LockVersion(i *LockVersionInput) (*Version, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/lock", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/lock", i.Service, i.Version) resp, err := c.Put(path, nil) if err != nil { return nil, err diff --git a/vendor/github.com/sethvargo/go-fastly/wordpress.go b/vendor/github.com/sethvargo/go-fastly/wordpress.go index 661d61348..c1e0a4efb 100644 --- a/vendor/github.com/sethvargo/go-fastly/wordpress.go +++ b/vendor/github.com/sethvargo/go-fastly/wordpress.go @@ -8,7 +8,7 @@ import ( // Wordpress represents a wordpress response from the Fastly API. type Wordpress struct { ServiceID string `mapstructure:"service_id"` - Version string `mapstructure:"version"` + Version int `mapstructure:"version"` Name string `mapstructure:"name"` Path string `mapstructure:"path"` @@ -30,7 +30,7 @@ type ListWordpressesInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int } // ListWordpresses returns the list of wordpresses for the configuration version. @@ -39,11 +39,11 @@ func (c *Client) ListWordpresses(i *ListWordpressesInput) ([]*Wordpress, error) return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/wordpress", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/wordpress", i.Service, i.Version) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -62,7 +62,7 @@ type CreateWordpressInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int Name string `form:"name,omitempty"` Path string `form:"path,omitempty"` @@ -75,11 +75,11 @@ func (c *Client) CreateWordpress(i *CreateWordpressInput) (*Wordpress, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } - path := fmt.Sprintf("/service/%s/version/%s/wordpress", i.Service, i.Version) + path := fmt.Sprintf("/service/%s/version/%d/wordpress", i.Service, i.Version) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err @@ -97,7 +97,7 @@ type GetWordpressInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the wordpress to fetch. Name string @@ -109,7 +109,7 @@ func (c *Client) GetWordpress(i *GetWordpressInput) (*Wordpress, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -117,7 +117,7 @@ func (c *Client) GetWordpress(i *GetWordpressInput) (*Wordpress, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/wordpress/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/wordpress/%s", i.Service, i.Version, i.Name) resp, err := c.Get(path, nil) if err != nil { return nil, err @@ -135,7 +135,7 @@ type UpdateWordpressInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the wordpress to update. Name string @@ -151,7 +151,7 @@ func (c *Client) UpdateWordpress(i *UpdateWordpressInput) (*Wordpress, error) { return nil, ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return nil, ErrMissingVersion } @@ -159,7 +159,7 @@ func (c *Client) UpdateWordpress(i *UpdateWordpressInput) (*Wordpress, error) { return nil, ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/wordpress/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/wordpress/%s", i.Service, i.Version, i.Name) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err @@ -177,7 +177,7 @@ type DeleteWordpressInput struct { // Service is the ID of the service. Version is the specific configuration // version. Both fields are required. Service string - Version string + Version int // Name is the name of the wordpress to delete (required). Name string @@ -189,7 +189,7 @@ func (c *Client) DeleteWordpress(i *DeleteWordpressInput) error { return ErrMissingService } - if i.Version == "" { + if i.Version == 0 { return ErrMissingVersion } @@ -197,7 +197,7 @@ func (c *Client) DeleteWordpress(i *DeleteWordpressInput) error { return ErrMissingName } - path := fmt.Sprintf("/service/%s/version/%s/wordpress/%s", i.Service, i.Version, i.Name) + path := fmt.Sprintf("/service/%s/version/%d/wordpress/%s", i.Service, i.Version, i.Name) resp, err := c.Delete(path, nil) if err != nil { return err diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index b13ab9f07..052ecfcd1 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -120,7 +120,6 @@ func (p *parser) parse() *node { default: panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) } - panic("unreachable") } func (p *parser) node(kind int) *node { diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index 2befd553e..6ecdcb3c7 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -666,7 +666,6 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") } - return false } // Expect ALIAS. diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go index 0a7037ad1..81d05dfe5 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool default: panic("invalid parser state") } - return false } // Parse the production: diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 93a863274..232313cc0 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -3,6 +3,7 @@ package yaml import ( "encoding/base64" "math" + "regexp" "strconv" "strings" "unicode/utf8" @@ -80,6 +81,8 @@ func resolvableTag(tag string) bool { return false } +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in @@ -135,9 +138,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if err == nil { return yaml_INT_TAG, uintv } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 25808000f..2c9d5111f 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -9,7 +9,7 @@ import ( // ************ // // The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is diff --git a/vendor/vendor.json b/vendor/vendor.json index 205e2eb02..b4fd8b29d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -440,10 +440,10 @@ "revisionTime": "2015-08-30T18:26:16Z" }, { - "checksumSHA1": "csR8njyJfkweB0RCtfnLwgXNeqQ=", + "checksumSHA1": "kMfAFLobZymMrCOm/Xi/g9gnJOU=", "path": "github.com/ajg/form", - "revision": "7ff89c75808766205bfa4411abb436c98c33eb5e", - "revisionTime": "2016-06-29T21:43:12Z" + "revision": "523a5da1a92f01b01f840b61689c0340a0243532", + "revisionTime": "2016-08-22T23:00:20Z" }, { "checksumSHA1": "kn+zdUr5TNsoAX8BgjOaWYtMT5U=", @@ -2872,10 +2872,10 @@ "revisionTime": "2017-03-13T16:33:22Z" }, { - "checksumSHA1": "bCpL8ZdY+y7OGwiN3hZzbQI5oM0=", + "checksumSHA1": "Je4BagkuoS2e+IlNJOf0+6JoB1g=", "path": "github.com/sethvargo/go-fastly", - "revision": "43b7f97296d6c8e3a7bc083ab91101fbbc8c2f94", - "revisionTime": "2017-02-28T16:12:19Z" + "revision": "6e883a4c1524445805f33ddb1ab2a09a49ce4b1c", + "revisionTime": "2017-04-17T16:09:14Z" }, { "checksumSHA1": "8tEiK6vhVXuUbnWME5XNWLgvtSo=", @@ -3509,10 +3509,10 @@ "revisionTime": "2016-11-01T17:03:53Z" }, { - "checksumSHA1": "12GqsW8PiRPnezDDy0v4brZrndM=", + "checksumSHA1": "fALlQNY1fM99NesfLJ50KguWsio=", "path": "gopkg.in/yaml.v2", - "revision": "a5b47d31c556af34a302ce5d659e6fea44d90de0", - "revisionTime": "2016-09-28T15:37:09Z" + "revision": "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b", + "revisionTime": "2017-04-07T17:21:22Z" }, { "checksumSHA1": "OcJdNALtPXoFOieAZjznhm7ufuU=", From 4a782583b6de41d3b9853c545a0ce04753a444bd Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 18 Apr 2017 13:31:45 -0500 Subject: [PATCH 204/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e036f20d..a76fa1417 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ BUG FIXES: * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes [GH-13699] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/azurerm: Locking around Network Security Group / Subnets [GH-13637] + * provider/fastly: Fix issue with using 0 for `default_ttl` [GH-13648] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] * provider/openstack: Fix updating Ports [GH-13604] From f712880fee355a69567b40cda2486c198da15aec Mon Sep 17 00:00:00 2001 From: Jay Wang Date: Fri, 14 Apr 2017 15:19:22 -0700 Subject: [PATCH 205/342] Lock Route Table / Subnets --- .../providers/azurerm/resource_arm_subnet.go | 19 +++++++++++++++++ .../azurerm/resource_arm_subnet_test.go | 21 +++++++++++++++++-- builtin/providers/azurerm/resourceid.go | 9 ++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_subnet.go b/builtin/providers/azurerm/resource_arm_subnet.go index 65df4f447..f91dd85b4 100644 --- a/builtin/providers/azurerm/resource_arm_subnet.go +++ b/builtin/providers/azurerm/resource_arm_subnet.go @@ -104,6 +104,14 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error { properties.RouteTable = &network.RouteTable{ ID: &rtId, } + + routeTableName, err := parseRouteTableName(rtId) + if err != nil { + return err + } + + armMutexKV.Lock(routeTableName) + defer armMutexKV.Unlock(routeTableName) } subnet := network.Subnet{ @@ -201,6 +209,17 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error { defer armMutexKV.Unlock(networkSecurityGroupName) } + if v, ok := d.GetOk("route_table_id"); ok { + rtId := v.(string) + routeTableName, err := parseRouteTableName(rtId) + if err != nil { + return err + } + + armMutexKV.Lock(routeTableName) + defer armMutexKV.Unlock(routeTableName) + } + armMutexKV.Lock(vnetName) defer armMutexKV.Unlock(vnetName) diff --git a/builtin/providers/azurerm/resource_arm_subnet_test.go b/builtin/providers/azurerm/resource_arm_subnet_test.go index 5f1f2bcbe..264b0a540 100644 --- a/builtin/providers/azurerm/resource_arm_subnet_test.go +++ b/builtin/providers/azurerm/resource_arm_subnet_test.go @@ -13,7 +13,7 @@ import ( func TestAccAzureRMSubnet_basic(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -33,7 +33,7 @@ func TestAccAzureRMSubnet_basic(t *testing.T) { func TestAccAzureRMSubnet_disappears(t *testing.T) { ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -152,5 +152,22 @@ resource "azurerm_subnet" "test" { resource_group_name = "${azurerm_resource_group.test.name}" virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "10.0.2.0/24" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_route_table" "test" { + name = "acctestroutetable%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "West US" +} + +resource "azurerm_route" "test" { + name = "acctestroute%d" + resource_group_name = "${azurerm_resource_group.test.name}" + route_table_name = "${azurerm_route_table.test.name}" + + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" } ` diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go index f981b410b..281bd8f9b 100644 --- a/builtin/providers/azurerm/resourceid.go +++ b/builtin/providers/azurerm/resourceid.go @@ -104,3 +104,12 @@ func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error return id.Path["networkSecurityGroups"], nil } + +func parseRouteTableName(routeTableId string) (string, error) { + id, err := parseAzureResourceID(routeTableId) + if err != nil { + return "", fmt.Errorf("[ERROR] Unable to parse Route Table ID '%s': %+v", routeTableId, err) + } + + return id.Path["routeTables"], nil +} From 1320347b7b3dbe9bc9da5d0802b8a29a1301b1d4 Mon Sep 17 00:00:00 2001 From: Stephen Weatherford Date: Mon, 17 Apr 2017 23:05:20 +0000 Subject: [PATCH 206/342] Fix another issue from #11625 that's causing null entries in ssh_keys --- builtin/providers/azurerm/resource_arm_virtual_machine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index e3fe29808..c3f736412 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -1033,7 +1033,7 @@ func flattenAzureRmVirtualMachineOsProfileLinuxConfiguration(config *compute.Lin result["disable_password_authentication"] = *config.DisablePasswordAuthentication if config.SSH != nil && len(*config.SSH.PublicKeys) > 0 { - ssh_keys := make([]map[string]interface{}, len(*config.SSH.PublicKeys)) + ssh_keys := make([]map[string]interface{}, 0, len(*config.SSH.PublicKeys)) for _, i := range *config.SSH.PublicKeys { key := make(map[string]interface{}) key["path"] = *i.Path From c42b2381c6d7add222362964da187a9f78a65c90 Mon Sep 17 00:00:00 2001 From: Jay Wang Date: Tue, 18 Apr 2017 17:17:14 -0700 Subject: [PATCH 207/342] Added check for empty strings in resource id parsing logic. --- builtin/providers/azurerm/resourceid.go | 5 +++++ builtin/providers/azurerm/resourceid_test.go | 12 ++++++++++++ 2 files changed, 17 insertions(+) diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go index 281bd8f9b..4f89945e7 100644 --- a/builtin/providers/azurerm/resourceid.go +++ b/builtin/providers/azurerm/resourceid.go @@ -53,6 +53,11 @@ func parseAzureResourceID(id string) (*ResourceID, error) { key := components[current] value := components[current+1] + // Check key/value for empty strings. + if key == "" || value == "" { + return nil, fmt.Errorf("Key/Value cannot be empty strings. Key: '%s', Value: '%s'", key, value) + } + // Catch the subscriptionID before it can be overwritten by another "subscriptions" // value in the ID which is the case for the Service Bus subscription resource if key == "subscriptions" && subscriptionID == "" { diff --git a/builtin/providers/azurerm/resourceid_test.go b/builtin/providers/azurerm/resourceid_test.go index dff6ed800..4359b70d1 100644 --- a/builtin/providers/azurerm/resourceid_test.go +++ b/builtin/providers/azurerm/resourceid_test.go @@ -11,6 +11,18 @@ func TestParseAzureResourceID(t *testing.T) { expectedResourceID *ResourceID expectError bool }{ + { + // Missing "resourceGroups". + "/subscriptions/00000000-0000-0000-0000-000000000000//myResourceGroup/", + nil, + true, + }, + { + // Empty resource group ID. + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//", + nil, + true, + }, { "random", nil, From 6c13f367edf2277036dfcf1c9820a58590a5c165 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 19 Apr 2017 09:45:09 -0400 Subject: [PATCH 208/342] core: Bump AutoRest Dep Bumps autorest dependency for Azure providers. Fixes: #11131 --- .../Azure/go-autorest/autorest/autorest.go | 1 + .../Azure/go-autorest/autorest/azure/async.go | 5 +- .../autorest/azure/environments.go | 162 ++++++++++-------- .../Azure/go-autorest/autorest/azure/token.go | 6 +- .../Azure/go-autorest/autorest/client.go | 41 ++++- .../Azure/go-autorest/autorest/error.go | 3 + .../Azure/go-autorest/autorest/preparer.go | 10 ++ .../Azure/go-autorest/autorest/responder.go | 34 +++- .../Azure/go-autorest/autorest/sender.go | 7 +- .../Azure/go-autorest/autorest/version.go | 23 ++- vendor/vendor.json | 44 ++--- 11 files changed, 218 insertions(+), 118 deletions(-) diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 9804f401e..51f1c4bbc 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -16,6 +16,7 @@ and Responding. A typical pattern is: DoRetryForAttempts(5, time.Second)) err = Respond(resp, + ByDiscardingBody(), ByClosing()) Each phase relies on decorators to modify and / or manage processing. Decorators may first modify diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 280d32a61..6e076981f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -3,12 +3,13 @@ package azure import ( "bytes" "fmt" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" "io/ioutil" "net/http" "strings" "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" ) const ( diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index ebf754eab..4701b4376 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -19,93 +19,108 @@ var environments = map[string]Environment{ // Environment represents a set of endpoints for each of Azure's Clouds. type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` } var ( // PublicCloud is the default public Azure cloud environment PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.azure.com", + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", } // USGovernmentCloud is the cloud environment for the US Government USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", } // ChinaCloud is the cloud environment operated in China ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/?api-version=1.0", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", } // GermanCloud is the cloud environment operated in Germany GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", } ) @@ -121,8 +136,13 @@ func EnvironmentFromName(name string) (Environment, error) { // OAuthConfigForTenant returns an OAuthConfig with tenant specific urls func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { + return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint +func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { template := "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(env.ActiveDirectoryEndpoint) + u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go index db9a8fa02..cfcd03011 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go @@ -91,7 +91,7 @@ type ServicePrincipalNoSecret struct { // SetAuthenticationValues is a method of the interface ServicePrincipalSecret // It only returns an error for the ServicePrincipalNoSecret type func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token.") + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") } // ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form @@ -138,7 +138,7 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint token.Claims = jwt.MapClaims{ - "aud": spt.oauthConfig.TokenEndpoint, + "aud": spt.oauthConfig.TokenEndpoint.String(), "iss": spt.clientID, "sub": spt.clientID, "jti": base64.URLEncoding.EncodeToString(jti), @@ -302,7 +302,7 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { var newToken Token err = autorest.Respond(resp, - autorest.WithErrorUnlessOK(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&newToken), autorest.ByClosing()) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index b55b3d103..b5f94b5c3 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -8,6 +8,7 @@ import ( "log" "net/http" "net/http/cookiejar" + "runtime" "time" ) @@ -22,13 +23,24 @@ const ( DefaultRetryAttempts = 3 ) -var statusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 -} +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) const ( requestFormat = `HTTP Request Begin =================================================== @@ -140,13 +152,24 @@ type Client struct { // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed // string. func NewClientWithUserAgent(ua string) Client { - return Client{ + c := Client{ PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: 30 * time.Second, - UserAgent: ua, + UserAgent: defaultUserAgent, } + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) } // Do implements the Sender interface by invoking the active Sender after applying authorization. diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go index 2e4fc79c1..4bcb8f27b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -28,6 +28,9 @@ type DetailedError struct { // Message is the error message. Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte } // NewError creates a new Error conforming object from the passed packageType, method, and diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 5b2c52704..c9deb261a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -183,6 +183,16 @@ func WithBaseURL(baseURL string) PrepareDecorator { } } +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + // WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the // http.Request body. func WithFormData(v url.Values) PrepareDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index e377ad48a..87f71e585 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -5,6 +5,7 @@ import ( "encoding/json" "encoding/xml" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -87,6 +88,24 @@ func ByCopying(b *bytes.Buffer) RespondDecorator { } } +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + // ByClosing returns a RespondDecorator that first invokes the passed Responder after which it // closes the response body. Since the passed Responder is invoked prior to closing the response // body, the decorator may occur anywhere within the set. @@ -128,6 +147,8 @@ func ByUnmarshallingJSON(v interface{}) RespondDecorator { err := r.Respond(resp) if err == nil { b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) if errInner != nil { err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) } else if len(strings.Trim(string(b), " ")) > 0 { @@ -165,17 +186,24 @@ func ByUnmarshallingXML(v interface{}) RespondDecorator { } // WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { return func(r Responder) Responder { return ResponderFunc(func(resp *http.Response) error { err := r.Respond(resp) if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", resp.Request.Method, resp.Request.URL, resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr } return err }) diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 93e6489e9..9c0697815 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -73,7 +73,7 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht func AfterDelay(d time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 1, r.Cancel) { + if !DelayForBackoff(d, 0, r.Cancel) { return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") } return s.Do(r) @@ -97,7 +97,7 @@ func DoCloseIfError() SendDecorator { return SenderFunc(func(r *http.Request) (*http.Response, error) { resp, err := s.Do(r) if err != nil { - Respond(resp, ByClosing()) + Respond(resp, ByDiscardingBody(), ByClosing()) } return resp, err }) @@ -156,6 +156,7 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... for err == nil && ResponseHasStatusCode(resp, codes...) { Respond(resp, + ByDiscardingBody(), ByClosing()) resp, err = SendWithSender(s, r, AfterDelay(GetRetryAfter(resp, delay))) @@ -257,6 +258,8 @@ func WithLogging(logger *log.Logger) SendDecorator { // passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set // to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, // returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { select { case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 8031a332c..e325f4ce2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -2,17 +2,28 @@ package autorest import ( "fmt" + "strings" + "sync" ) const ( - major = "7" - minor = "0" - patch = "0" - tag = "" - semVerFormat = "%s.%s.%s%s" + major = 7 + minor = 3 + patch = 1 + tag = "" ) +var versionLock sync.Once +var version string + // Version returns the semantic version (see http://semver.org). func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + versionLock.Do(func() { + version = fmt.Sprintf("v%d.%d.%d", major, minor, patch) + + if trimmed := strings.TrimPrefix(tag, "-"); trimmed != "" { + version = fmt.Sprintf("%s-%s", version, trimmed) + } + }) + return version } diff --git a/vendor/vendor.json b/vendor/vendor.json index b4fd8b29d..ebaf9a352 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -276,48 +276,48 @@ "revisionTime": "2016-06-22T17:32:16Z" }, { - "checksumSHA1": "eVSHe6GIHj9/ziFrQLZ1SC7Nn6k=", + "checksumSHA1": "U2+FgaMOPEFg/yHLD5RbiXI1cq4=", "comment": "v7.0.5", "path": "github.com/Azure/go-autorest/autorest", - "revision": "0781901f19f1e7db3034d97ec57af753db0bf808", - "revisionTime": "2016-10-03T18:39:13Z", - "version": "v7.2.1", - "versionExact": "v7.2.1" + "revision": "a2fdd780c9a50455cecd249b00bdc3eb73a78e31", + "revisionTime": "2017-04-06T20:28:05Z", + "version": "v7.3.1", + "versionExact": "v7.3.1" }, { - "checksumSHA1": "z8FwqeLK0Pluo7FYC5k2MVBoils=", + "checksumSHA1": "ghrnc4vZv6q8zzeakZnrS8CGFhE=", "comment": "v7.0.5", "path": "github.com/Azure/go-autorest/autorest/azure", - "revision": "0781901f19f1e7db3034d97ec57af753db0bf808", - "revisionTime": "2016-10-03T18:39:13Z", - "version": "v7.2.1", - "versionExact": "v7.2.1" + "revision": "a2fdd780c9a50455cecd249b00bdc3eb73a78e31", + "revisionTime": "2017-04-06T20:28:05Z", + "version": "v7.3.1", + "versionExact": "v7.3.1" }, { "checksumSHA1": "q9Qz8PAxK5FTOZwgYKe5Lj38u4c=", "comment": "v7.0.5", "path": "github.com/Azure/go-autorest/autorest/date", - "revision": "0781901f19f1e7db3034d97ec57af753db0bf808", - "revisionTime": "2016-10-03T18:39:13Z", - "version": "v7.2.1", - "versionExact": "v7.2.1" + "revision": "a2fdd780c9a50455cecd249b00bdc3eb73a78e31", + "revisionTime": "2017-04-06T20:28:05Z", + "version": "v7.3.1", + "versionExact": "v7.3.1" }, { "checksumSHA1": "Ev8qCsbFjDlMlX0N2tYAhYQFpUc=", "comment": "v7.0.5", "path": "github.com/Azure/go-autorest/autorest/to", - "revision": "0781901f19f1e7db3034d97ec57af753db0bf808", - "revisionTime": "2016-10-03T18:39:13Z", - "version": "v7.2.1", - "versionExact": "v7.2.1" + "revision": "a2fdd780c9a50455cecd249b00bdc3eb73a78e31", + "revisionTime": "2017-04-06T20:28:05Z", + "version": "v7.3.1", + "versionExact": "v7.3.1" }, { "checksumSHA1": "oBixceM+55gdk47iff8DSEIh3po=", "path": "github.com/Azure/go-autorest/autorest/validation", - "revision": "0781901f19f1e7db3034d97ec57af753db0bf808", - "revisionTime": "2016-10-03T18:39:13Z", - "version": "v7.2.1", - "versionExact": "v7.2.1" + "revision": "a2fdd780c9a50455cecd249b00bdc3eb73a78e31", + "revisionTime": "2017-04-06T20:28:05Z", + "version": "v7.3.1", + "versionExact": "v7.3.1" }, { "checksumSHA1": "ICScouhAqYHoJEpJlJMYg7EzgyY=", From af5e22cf94d4dc92559895ea78fe07c829d196df Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 10:10:07 -0400 Subject: [PATCH 209/342] don't leave WaitForState goroutine running Make sure that we can cancel the WaitForState refresh loop when reaching a timeout, otherwise it may run indefinitely. There's no need to try and store and read the Result concurrently, just pass the value over a channel. --- helper/resource/state.go | 91 +++++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 30 deletions(-) diff --git a/helper/resource/state.go b/helper/resource/state.go index 7473a105e..dd5bfd4b6 100644 --- a/helper/resource/state.go +++ b/helper/resource/state.go @@ -2,7 +2,6 @@ package resource import ( "log" - "sync/atomic" "time" ) @@ -62,33 +61,45 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { conf.ContinuousTargetOccurence = 1 } - // We can't safely read the result values if we timeout, so store them in - // an atomic.Value type Result struct { Result interface{} State string Error error + Done bool } - var lastResult atomic.Value - lastResult.Store(Result{}) - doneCh := make(chan struct{}) + // read ever result from the refresh loop, waiting for a positive result.Done + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + go func() { - defer close(doneCh) + defer close(resCh) - // Wait for the delay time.Sleep(conf.Delay) - wait := 100 * time.Millisecond + // start with 0 delay for the first loop + var wait time.Duration for { + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + res, currentState, err := conf.Refresh() result := Result{ Result: res, State: currentState, Error: err, } - lastResult.Store(result) + resCh <- result if err != nil { return @@ -98,6 +109,8 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { if res == nil && len(conf.Target) == 0 { targetOccurence += 1 if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result return } else { continue @@ -113,7 +126,7 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { LastError: err, Retries: notfoundTick, } - lastResult.Store(result) + resCh <- result return } } else { @@ -126,6 +139,8 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { found = true targetOccurence += 1 if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result return } else { continue @@ -147,7 +162,7 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { State: result.State, ExpectedState: conf.Target, } - lastResult.Store(result) + resCh <- result return } } @@ -162,30 +177,46 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } else if wait > 10*time.Second { wait = 10 * time.Second } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } } log.Printf("[TRACE] Waiting %s before next try", wait) - time.Sleep(wait) - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } } }() - select { - case <-doneCh: - r := lastResult.Load().(Result) - return r.Result, r.Error - case <-time.After(conf.Timeout): - r := lastResult.Load().(Result) - return nil, &TimeoutError{ - LastError: r.Error, - LastState: r.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + close(cancelCh) + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } } } } From 6601b9b8dd2f7d2fb8474eaadc46a5850f489189 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 10:10:54 -0400 Subject: [PATCH 210/342] adjust the inconsistent_negative test to match This test unfortunately relies on the timing of the loops in WaitForState, and the text of the error message. Adjust the timing so the timeout isn't an even multiple of the poll interval, and make sure we reach a minimum number of retries. --- helper/resource/state_test.go | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/helper/resource/state_test.go b/helper/resource/state_test.go index 4b4731351..5e0cbe2dc 100644 --- a/helper/resource/state_test.go +++ b/helper/resource/state_test.go @@ -2,6 +2,8 @@ package resource import ( "errors" + "strings" + "sync/atomic" "testing" "time" ) @@ -109,11 +111,18 @@ func TestWaitForState_inconsistent_positive(t *testing.T) { } func TestWaitForState_inconsistent_negative(t *testing.T) { + refreshCount := int64(0) + f := InconsistentStateRefreshFunc() + refresh := func() (interface{}, string, error) { + atomic.AddInt64(&refreshCount, 1) + return f() + } + conf := &StateChangeConf{ Pending: []string{"replicating"}, Target: []string{"done"}, - Refresh: InconsistentStateRefreshFunc(), - Timeout: 90 * time.Millisecond, + Refresh: refresh, + Timeout: 85 * time.Millisecond, PollInterval: 10 * time.Millisecond, ContinuousTargetOccurence: 4, } @@ -123,9 +132,17 @@ func TestWaitForState_inconsistent_negative(t *testing.T) { if err == nil { t.Fatal("Expected timeout error. No error returned.") } - expectedErr := "timeout while waiting for state to become 'done' (last state: 'done', timeout: 90ms)" - if err.Error() != expectedErr { - t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) + + // we can't guarantee the exact number of refresh calls in the tests by + // timing them, but we want to make sure the test at least went through th + // required states. + if atomic.LoadInt64(&refreshCount) < 6 { + t.Fatal("refreshed called too few times") + } + + expectedErr := "timeout while waiting for state to become 'done'" + if !strings.HasPrefix(err.Error(), expectedErr) { + t.Fatalf("error prefix doesn't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) } } From 3bf354b16bab2b9f03e15ec066cf4a88360da7f2 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 19 Apr 2017 10:59:52 -0400 Subject: [PATCH 211/342] bump go-oracle-compute vendor --- .../go-oracle-terraform/compute/storage_volumes.go | 4 ++-- vendor/vendor.json | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go index 5ca9b70c7..da2ce8e0d 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volumes.go @@ -6,8 +6,8 @@ import ( "strings" ) -const WaitForVolumeReadyTimeout = 30 -const WaitForVolumeDeleteTimeout = 30 +const WaitForVolumeReadyTimeout = 600 +const WaitForVolumeDeleteTimeout = 600 // StorageVolumeClient is a client for the Storage Volume functions of the Compute API. type StorageVolumeClient struct { diff --git a/vendor/vendor.json b/vendor/vendor.json index 62c749fac..23471f32d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1970,22 +1970,22 @@ "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, { - "checksumSHA1": "QmBTE/boTriUYw0meIg7xOXo6ns=", + "checksumSHA1": "yNCeppZjpZqEUgp/4g0AeJ+ee44=", "path": "github.com/hashicorp/go-oracle-terraform/compute", - "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", - "revisionTime": "2017-04-06T17:51:51Z" + "revision": "bb3c65caa646927d1ad68b97f77241f48c855ad0", + "revisionTime": "2017-04-19T14:06:08Z" }, { "checksumSHA1": "DzK7lYwHt5Isq5Zf73cnQqBO2LI=", "path": "github.com/hashicorp/go-oracle-terraform/helper", - "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", - "revisionTime": "2017-04-06T17:51:51Z" + "revision": "bb3c65caa646927d1ad68b97f77241f48c855ad0", + "revisionTime": "2017-04-19T14:06:08Z" }, { "checksumSHA1": "AyNRs19Es9pDw2VMxVKWuLx3Afg=", "path": "github.com/hashicorp/go-oracle-terraform/opc", - "revision": "5508daed82ecd55b71d45e8a149e99d24825e5bb", - "revisionTime": "2017-04-06T17:51:51Z" + "revision": "bb3c65caa646927d1ad68b97f77241f48c855ad0", + "revisionTime": "2017-04-19T14:06:08Z" }, { "checksumSHA1": "b0nQutPMJHeUmz4SjpreotAo6Yk=", From 6bc3610c0925e4edea14d107be71e42b60be2ea7 Mon Sep 17 00:00:00 2001 From: = Date: Wed, 19 Apr 2017 09:18:30 -0600 Subject: [PATCH 212/342] removing extraneous code --- helper/resource/testing.go | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/helper/resource/testing.go b/helper/resource/testing.go index 23af81096..5f08c2a3f 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -354,21 +354,15 @@ func Test(t TestT, c TestCase) { // Any errors are stored so that they can be returned by the factory in // terraform to match non-test behavior. func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - + ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory) + if ctxProviders == nil { + ctxProviders = make(map[string]terraform.ResourceProviderFactory) + } // add any fixed providers for k, p := range c.Providers { ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) } - // call any factory functions and store the result. - for k, pf := range c.ProviderFactories { - p, err := pf() - ctxProviders[k] = func() (terraform.ResourceProvider, error) { - return p, err - } - } - // reset the providers if needed for k, pf := range ctxProviders { // we can ignore any errors here, if we don't have a provider to reset From 86d7c47c0ab6239c261e368b184e6d2c86c60252 Mon Sep 17 00:00:00 2001 From: tmshn Date: Wed, 19 Apr 2017 20:26:49 +0900 Subject: [PATCH 213/342] Change cidrhost() to get IP from end of the range when negative number given Ref: https://github.com/apparentlymart/go-cidr/pull/2 --- config/interpolate_funcs_test.go | 15 +++++++++++++++ .../apparentlymart/go-cidr/cidr/cidr.go | 9 ++++++++- vendor/vendor.json | 5 +++-- .../docs/configuration/interpolation.html.md | 6 ++++-- 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 78816b6dd..801be6dbb 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -541,11 +541,26 @@ func TestInterpolateFuncCidrHost(t *testing.T) { "192.168.1.5", false, }, + { + `${cidrhost("192.168.1.0/24", -5)}`, + "192.168.1.251", + false, + }, + { + `${cidrhost("192.168.1.0/24", -256)}`, + "192.168.1.0", + false, + }, { `${cidrhost("192.168.1.0/30", 255)}`, nil, true, // 255 doesn't fit in two bits }, + { + `${cidrhost("192.168.1.0/30", -255)}`, + nil, + true, // 255 doesn't fit in two bits + }, { `${cidrhost("not-a-cidr", 6)}`, nil, diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index 1583d6382..a31cdec77 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -61,7 +61,14 @@ func Host(base *net.IPNet, num int) (net.IP, error) { hostLen := addrLen - parentLen maxHostNum := uint64(1< maxHostNum { + + numUint64 := uint64(num) + if num < 0 { + numUint64 = uint64(-num) - 1 + num = int(maxHostNum - numUint64) + } + + if numUint64 > maxHostNum { return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) } diff --git a/vendor/vendor.json b/vendor/vendor.json index b4fd8b29d..e6ff9097c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -446,9 +446,10 @@ "revisionTime": "2016-08-22T23:00:20Z" }, { - "checksumSHA1": "kn+zdUr5TNsoAX8BgjOaWYtMT5U=", + "checksumSHA1": "FIL83loX9V9APvGQIjJpbxq53F0=", "path": "github.com/apparentlymart/go-cidr/cidr", - "revision": "a3ebdb999b831ecb6ab8a226e31b07b2b9061c47" + "revision": "7e4b007599d4e2076d9a81be723b3912852dda2c", + "revisionTime": "2017-04-18T07:21:50Z" }, { "checksumSHA1": "yicZ9OtLcy3iCgraWO015yeoO5E=", diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 5958aef70..1b4b69c5c 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -157,8 +157,10 @@ The supported built-in functions are: * `chomp(string)` - Removes trailing newlines from the given string. * `cidrhost(iprange, hostnum)` - Takes an IP address range in CIDR notation - and creates an IP address with the given host number. For example, - `cidrhost("10.0.0.0/8", 2)` returns `10.0.0.2`. + and creates an IP address with the given host number. If given host + number is negative, the count starts from the end of the range. + For example, `cidrhost("10.0.0.0/8", 2)` returns `10.0.0.2` and + `cidrhost("10.0.0.0/8", -2)` returns `10.255.255.254`. * `cidrnetmask(iprange)` - Takes an IP address range in CIDR notation and returns the address-formatted subnet mask format that some From cfa31b57a384431c77288e5e5b52d3d1085fd01b Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 19 Apr 2017 09:42:42 -0700 Subject: [PATCH 214/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a76fa1417..e2a2ed51d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ FEATURES: IMPROVEMENTS: + * config: The interpolation function `cidrhost` now accepts a negative host number to count backwards from the end of the range [GH-13765] * state/remote/swift: Support Openstack request logging [GH-13583] * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` [GH-13273] From 03b82191bf11aaffaacc3abb7b268752e29eeab3 Mon Sep 17 00:00:00 2001 From: Yamamoto Date: Thu, 20 Apr 2017 02:20:02 +0900 Subject: [PATCH 215/342] `roles` deprecated in examples (#13769) --- examples/aws-ecs-alb/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/aws-ecs-alb/main.tf b/examples/aws-ecs-alb/main.tf index f0dd7d33b..aaa5bfad9 100644 --- a/examples/aws-ecs-alb/main.tf +++ b/examples/aws-ecs-alb/main.tf @@ -249,7 +249,7 @@ EOF resource "aws_iam_instance_profile" "app" { name = "tf-ecs-instprofile" - roles = ["${aws_iam_role.app_instance.name}"] + role = "${aws_iam_role.app_instance.name}" } resource "aws_iam_role" "app_instance" { From 596f05b881569d5a89b3fa6fe57732cabacafda5 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Wed, 19 Apr 2017 10:21:31 -0700 Subject: [PATCH 216/342] Fix govet errors (#13774) --- .../resource_digitalocean_droplet_test.go | 4 +-- .../fastly/resource_fastly_service_v1.go | 32 +++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 90bece0e3..be3bd10b7 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -334,11 +334,11 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.T } if droplet.Size.PriceHourly != 0.00744 { - return fmt.Errorf("Bad price_hourly: %s", droplet.Size.PriceHourly) + return fmt.Errorf("Bad price_hourly: %v", droplet.Size.PriceHourly) } if droplet.Size.PriceMonthly != 5.0 { - return fmt.Errorf("Bad price_monthly: %s", droplet.Size.PriceMonthly) + return fmt.Errorf("Bad price_monthly: %v", droplet.Size.PriceMonthly) } if droplet.Region.Slug != "nyc3" { diff --git a/builtin/providers/fastly/resource_fastly_service_v1.go b/builtin/providers/fastly/resource_fastly_service_v1.go index db9f6df2d..e9408902f 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1.go +++ b/builtin/providers/fastly/resource_fastly_service_v1.go @@ -1664,7 +1664,7 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error { } // validate version - log.Printf("[DEBUG] Validating Fastly Service (%s), Version (%s)", d.Id(), latestVersion) + log.Printf("[DEBUG] Validating Fastly Service (%s), Version (%v)", d.Id(), latestVersion) valid, msg, err := conn.ValidateVersion(&gofastly.ValidateVersionInput{ Service: d.Id(), Version: latestVersion, @@ -1678,7 +1678,7 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("[ERR] Invalid configuration for Fastly Service (%s): %s", d.Id(), msg) } - log.Printf("[DEBUG] Activating Fastly Service (%s), Version (%s)", d.Id(), latestVersion) + log.Printf("[DEBUG] Activating Fastly Service (%s), Version (%v)", d.Id(), latestVersion) _, err = conn.ActivateVersion(&gofastly.ActivateVersionInput{ Service: d.Id(), Version: latestVersion, @@ -1735,7 +1735,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { d.Set("default_host", settings.DefaultHost) d.Set("default_ttl", settings.DefaultTTL) } else { - return fmt.Errorf("[ERR] Error looking up Version settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Version settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } // TODO: update go-fastly to support an ActiveVersion struct, which contains @@ -1748,7 +1748,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } // Refresh Domains @@ -1766,7 +1766,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } bl := flattenBackends(backendList) @@ -1783,7 +1783,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } hl := flattenHeaders(headerList) @@ -1800,7 +1800,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } gl := flattenGzips(gzipsList) @@ -1817,7 +1817,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } hcl := flattenHealthchecks(healthcheckList) @@ -1834,7 +1834,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } sl := flattenS3s(s3List) @@ -1851,7 +1851,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } pl := flattenPapertrails(papertrailList) @@ -1868,7 +1868,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Sumologic for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Sumologic for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } sul := flattenSumologics(sumologicList) @@ -1884,7 +1884,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } rol := flattenResponseObjects(responseObjectList) @@ -1901,7 +1901,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } cl := flattenConditions(conditionList) @@ -1918,7 +1918,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Request Settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Request Settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } rl := flattenRequestSettings(rsList) @@ -1934,7 +1934,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { Version: s.ActiveVersion.Number, }) if err != nil { - return fmt.Errorf("[ERR] Error looking up VCLs for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up VCLs for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } vl := flattenVCLs(vclList) @@ -1950,7 +1950,7 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { Version: s.ActiveVersion.Number, }) if err != nil { - return fmt.Errorf("[ERR] Error looking up Cache Settings for (%s), version (%d): %s", d.Id(), s.ActiveVersion.Number, err) + return fmt.Errorf("[ERR] Error looking up Cache Settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) } csl := flattenCacheSettings(cslList) From 3c6287cf3c8b1ef87acc5b3af0e52772cfc2a7cf Mon Sep 17 00:00:00 2001 From: Gauthier Wallet Date: Wed, 19 Apr 2017 13:22:36 -0400 Subject: [PATCH 217/342] Vendored AWS CognitoIdentity Service (#13771) --- .../aws-sdk-go/service/cognitoidentity/api.go | 4113 +++++++++++++++++ .../service/cognitoidentity/customizations.go | 12 + .../service/cognitoidentity/errors.go | 77 + .../service/cognitoidentity/service.go | 124 + vendor/vendor.json | 8 + 5 files changed, 4334 insertions(+) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go new file mode 100644 index 000000000..cd2465144 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go @@ -0,0 +1,4113 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cognitoidentity provides a client for Amazon Cognito Identity. +package cognitoidentity + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateIdentityPool = "CreateIdentityPool" + +// CreateIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the CreateIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateIdentityPool for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateIdentityPoolRequest method. +// req, resp := client.CreateIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/CreateIdentityPool +func (c *CognitoIdentity) CreateIdentityPoolRequest(input *CreateIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opCreateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIdentityPoolInput{} + } + + output = &IdentityPool{} + req = c.newRequest(op, input, output) + return +} + +// CreateIdentityPool API operation for Amazon Cognito Identity. +// +// Creates a new identity pool. The identity pool is a store of user identity +// information that is specific to your AWS account. The limit on identity pools +// is 60 per account. The keys for SupportedLoginProviders are as follows: +// +// * Facebook: graph.facebook.com +// +// * Google: accounts.google.com +// +// * Amazon: www.amazon.com +// +// * Twitter: api.twitter.com +// +// * Digits: www.digits.com +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation CreateIdentityPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Thrown when the total number of user pools has exceeded a preset limit. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/CreateIdentityPool +func (c *CognitoIdentity) CreateIdentityPool(input *CreateIdentityPoolInput) (*IdentityPool, error) { + req, out := c.CreateIdentityPoolRequest(input) + return out, req.Send() +} + +// CreateIdentityPoolWithContext is the same as CreateIdentityPool with the addition of +// the ability to pass a context and additional request options. +// +// See CreateIdentityPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) CreateIdentityPoolWithContext(ctx aws.Context, input *CreateIdentityPoolInput, opts ...request.Option) (*IdentityPool, error) { + req, out := c.CreateIdentityPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteIdentities = "DeleteIdentities" + +// DeleteIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteIdentities for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentitiesRequest method. +// req, resp := client.DeleteIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentities +func (c *CognitoIdentity) DeleteIdentitiesRequest(input *DeleteIdentitiesInput) (req *request.Request, output *DeleteIdentitiesOutput) { + op := &request.Operation{ + Name: opDeleteIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentitiesInput{} + } + + output = &DeleteIdentitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteIdentities API operation for Amazon Cognito Identity. +// +// Deletes identities from an identity pool. You can specify a list of 1-60 +// identities that you want to delete. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation DeleteIdentities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentities +func (c *CognitoIdentity) DeleteIdentities(input *DeleteIdentitiesInput) (*DeleteIdentitiesOutput, error) { + req, out := c.DeleteIdentitiesRequest(input) + return out, req.Send() +} + +// DeleteIdentitiesWithContext is the same as DeleteIdentities with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIdentities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) DeleteIdentitiesWithContext(ctx aws.Context, input *DeleteIdentitiesInput, opts ...request.Option) (*DeleteIdentitiesOutput, error) { + req, out := c.DeleteIdentitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteIdentityPool = "DeleteIdentityPool" + +// DeleteIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteIdentityPool for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityPoolRequest method. +// req, resp := client.DeleteIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentityPool +func (c *CognitoIdentity) DeleteIdentityPoolRequest(input *DeleteIdentityPoolInput) (req *request.Request, output *DeleteIdentityPoolOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPoolInput{} + } + + output = &DeleteIdentityPoolOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteIdentityPool API operation for Amazon Cognito Identity. +// +// Deletes a user pool. Once a pool is deleted, users will not be able to authenticate +// with the pool. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation DeleteIdentityPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentityPool +func (c *CognitoIdentity) DeleteIdentityPool(input *DeleteIdentityPoolInput) (*DeleteIdentityPoolOutput, error) { + req, out := c.DeleteIdentityPoolRequest(input) + return out, req.Send() +} + +// DeleteIdentityPoolWithContext is the same as DeleteIdentityPool with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIdentityPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) DeleteIdentityPoolWithContext(ctx aws.Context, input *DeleteIdentityPoolInput, opts ...request.Option) (*DeleteIdentityPoolOutput, error) { + req, out := c.DeleteIdentityPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeIdentity = "DescribeIdentity" + +// DescribeIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityRequest method. +// req, resp := client.DescribeIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentity +func (c *CognitoIdentity) DescribeIdentityRequest(input *DescribeIdentityInput) (req *request.Request, output *IdentityDescription) { + op := &request.Operation{ + Name: opDescribeIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityInput{} + } + + output = &IdentityDescription{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIdentity API operation for Amazon Cognito Identity. +// +// Returns metadata related to the given identity, including when the identity +// was created and any associated linked logins. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation DescribeIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentity +func (c *CognitoIdentity) DescribeIdentity(input *DescribeIdentityInput) (*IdentityDescription, error) { + req, out := c.DescribeIdentityRequest(input) + return out, req.Send() +} + +// DescribeIdentityWithContext is the same as DescribeIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) DescribeIdentityWithContext(ctx aws.Context, input *DescribeIdentityInput, opts ...request.Option) (*IdentityDescription, error) { + req, out := c.DescribeIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeIdentityPool = "DescribeIdentityPool" + +// DescribeIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeIdentityPool for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityPoolRequest method. +// req, resp := client.DescribeIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentityPool +func (c *CognitoIdentity) DescribeIdentityPoolRequest(input *DescribeIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opDescribeIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityPoolInput{} + } + + output = &IdentityPool{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIdentityPool API operation for Amazon Cognito Identity. +// +// Gets details about a particular identity pool, including the pool name, ID +// description, creation date, and current number of users. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation DescribeIdentityPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentityPool +func (c *CognitoIdentity) DescribeIdentityPool(input *DescribeIdentityPoolInput) (*IdentityPool, error) { + req, out := c.DescribeIdentityPoolRequest(input) + return out, req.Send() +} + +// DescribeIdentityPoolWithContext is the same as DescribeIdentityPool with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdentityPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) DescribeIdentityPoolWithContext(ctx aws.Context, input *DescribeIdentityPoolInput, opts ...request.Option) (*IdentityPool, error) { + req, out := c.DescribeIdentityPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCredentialsForIdentity = "GetCredentialsForIdentity" + +// GetCredentialsForIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCredentialsForIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetCredentialsForIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCredentialsForIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCredentialsForIdentityRequest method. +// req, resp := client.GetCredentialsForIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetCredentialsForIdentity +func (c *CognitoIdentity) GetCredentialsForIdentityRequest(input *GetCredentialsForIdentityInput) (req *request.Request, output *GetCredentialsForIdentityOutput) { + op := &request.Operation{ + Name: opGetCredentialsForIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialsForIdentityInput{} + } + + output = &GetCredentialsForIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCredentialsForIdentity API operation for Amazon Cognito Identity. +// +// Returns credentials for the provided identity ID. Any provided logins will +// be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, +// it will be passed through to AWS Security Token Service with the appropriate +// role for the token. +// +// This is a public API. You do not need any credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation GetCredentialsForIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInvalidIdentityPoolConfigurationException "InvalidIdentityPoolConfigurationException" +// Thrown if the identity pool has no role associated for the given auth type +// (auth/unauth) or if the AssumeRole fails. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeExternalServiceException "ExternalServiceException" +// An exception thrown when a dependent service such as Facebook or Twitter +// is not responding +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetCredentialsForIdentity +func (c *CognitoIdentity) GetCredentialsForIdentity(input *GetCredentialsForIdentityInput) (*GetCredentialsForIdentityOutput, error) { + req, out := c.GetCredentialsForIdentityRequest(input) + return out, req.Send() +} + +// GetCredentialsForIdentityWithContext is the same as GetCredentialsForIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCredentialsForIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) GetCredentialsForIdentityWithContext(ctx aws.Context, input *GetCredentialsForIdentityInput, opts ...request.Option) (*GetCredentialsForIdentityOutput, error) { + req, out := c.GetCredentialsForIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetId = "GetId" + +// GetIdRequest generates a "aws/request.Request" representing the +// client's request for the GetId operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetId for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetId method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdRequest method. +// req, resp := client.GetIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetId +func (c *CognitoIdentity) GetIdRequest(input *GetIdInput) (req *request.Request, output *GetIdOutput) { + op := &request.Operation{ + Name: opGetId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdInput{} + } + + output = &GetIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetId API operation for Amazon Cognito Identity. +// +// Generates (or retrieves) a Cognito ID. Supplying multiple logins will create +// an implicit linked account. +// +// This is a public API. You do not need any credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation GetId for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Thrown when the total number of user pools has exceeded a preset limit. +// +// * ErrCodeExternalServiceException "ExternalServiceException" +// An exception thrown when a dependent service such as Facebook or Twitter +// is not responding +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetId +func (c *CognitoIdentity) GetId(input *GetIdInput) (*GetIdOutput, error) { + req, out := c.GetIdRequest(input) + return out, req.Send() +} + +// GetIdWithContext is the same as GetId with the addition of +// the ability to pass a context and additional request options. +// +// See GetId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) GetIdWithContext(ctx aws.Context, input *GetIdInput, opts ...request.Option) (*GetIdOutput, error) { + req, out := c.GetIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityPoolRoles = "GetIdentityPoolRoles" + +// GetIdentityPoolRolesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPoolRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetIdentityPoolRoles for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityPoolRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityPoolRolesRequest method. +// req, resp := client.GetIdentityPoolRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdentityPoolRoles +func (c *CognitoIdentity) GetIdentityPoolRolesRequest(input *GetIdentityPoolRolesInput) (req *request.Request, output *GetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opGetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoolRolesInput{} + } + + output = &GetIdentityPoolRolesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityPoolRoles API operation for Amazon Cognito Identity. +// +// Gets the roles for an identity pool. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation GetIdentityPoolRoles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdentityPoolRoles +func (c *CognitoIdentity) GetIdentityPoolRoles(input *GetIdentityPoolRolesInput) (*GetIdentityPoolRolesOutput, error) { + req, out := c.GetIdentityPoolRolesRequest(input) + return out, req.Send() +} + +// GetIdentityPoolRolesWithContext is the same as GetIdentityPoolRoles with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityPoolRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) GetIdentityPoolRolesWithContext(ctx aws.Context, input *GetIdentityPoolRolesInput, opts ...request.Option) (*GetIdentityPoolRolesOutput, error) { + req, out := c.GetIdentityPoolRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpenIdToken = "GetOpenIdToken" + +// GetOpenIdTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIdToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetOpenIdToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIdToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIdTokenRequest method. +// req, resp := client.GetOpenIdTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdToken +func (c *CognitoIdentity) GetOpenIdTokenRequest(input *GetOpenIdTokenInput) (req *request.Request, output *GetOpenIdTokenOutput) { + op := &request.Operation{ + Name: opGetOpenIdToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenInput{} + } + + output = &GetOpenIdTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpenIdToken API operation for Amazon Cognito Identity. +// +// Gets an OpenID token, using a known Cognito ID. This known Cognito ID is +// returned by GetId. You can optionally add additional logins for the identity. +// Supplying multiple logins creates an implicit link. +// +// The OpenId token is valid for 15 minutes. +// +// This is a public API. You do not need any credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation GetOpenIdToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeExternalServiceException "ExternalServiceException" +// An exception thrown when a dependent service such as Facebook or Twitter +// is not responding +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdToken +func (c *CognitoIdentity) GetOpenIdToken(input *GetOpenIdTokenInput) (*GetOpenIdTokenOutput, error) { + req, out := c.GetOpenIdTokenRequest(input) + return out, req.Send() +} + +// GetOpenIdTokenWithContext is the same as GetOpenIdToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpenIdToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) GetOpenIdTokenWithContext(ctx aws.Context, input *GetOpenIdTokenInput, opts ...request.Option) (*GetOpenIdTokenOutput, error) { + req, out := c.GetOpenIdTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpenIdTokenForDeveloperIdentity = "GetOpenIdTokenForDeveloperIdentity" + +// GetOpenIdTokenForDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIdTokenForDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetOpenIdTokenForDeveloperIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIdTokenForDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIdTokenForDeveloperIdentityRequest method. +// req, resp := client.GetOpenIdTokenForDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenForDeveloperIdentity +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentityRequest(input *GetOpenIdTokenForDeveloperIdentityInput) (req *request.Request, output *GetOpenIdTokenForDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opGetOpenIdTokenForDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenForDeveloperIdentityInput{} + } + + output = &GetOpenIdTokenForDeveloperIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpenIdTokenForDeveloperIdentity API operation for Amazon Cognito Identity. +// +// Registers (or retrieves) a Cognito IdentityId and an OpenID Connect token +// for a user authenticated by your backend authentication process. Supplying +// multiple logins will create an implicit linked account. You can only specify +// one developer provider as part of the Logins map, which is linked to the +// identity pool. The developer provider is the "domain" by which Cognito will +// refer to your users. +// +// You can use GetOpenIdTokenForDeveloperIdentity to create a new identity and +// to link new logins (that is, user credentials issued by a public provider +// or developer provider) to an existing identity. When you want to create a +// new identity, the IdentityId should be null. When you want to associate a +// new login with an existing authenticated/unauthenticated identity, you can +// do so by providing the existing IdentityId. This API will create the identity +// in the specified IdentityPoolId. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation GetOpenIdTokenForDeveloperIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeDeveloperUserAlreadyRegisteredException "DeveloperUserAlreadyRegisteredException" +// The provided developer user identifier is already registered with Cognito +// under a different identity ID. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenForDeveloperIdentity +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentity(input *GetOpenIdTokenForDeveloperIdentityInput) (*GetOpenIdTokenForDeveloperIdentityOutput, error) { + req, out := c.GetOpenIdTokenForDeveloperIdentityRequest(input) + return out, req.Send() +} + +// GetOpenIdTokenForDeveloperIdentityWithContext is the same as GetOpenIdTokenForDeveloperIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpenIdTokenForDeveloperIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentityWithContext(ctx aws.Context, input *GetOpenIdTokenForDeveloperIdentityInput, opts ...request.Option) (*GetOpenIdTokenForDeveloperIdentityOutput, error) { + req, out := c.GetOpenIdTokenForDeveloperIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListIdentities for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentitiesRequest method. +// req, resp := client.ListIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentities +func (c *CognitoIdentity) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + output = &ListIdentitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListIdentities API operation for Amazon Cognito Identity. +// +// Lists the identities in a pool. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation ListIdentities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentities +func (c *CognitoIdentity) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + return out, req.Send() +} + +// ListIdentitiesWithContext is the same as ListIdentities with the addition of +// the ability to pass a context and additional request options. +// +// See ListIdentities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) ListIdentitiesWithContext(ctx aws.Context, input *ListIdentitiesInput, opts ...request.Option) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListIdentityPools = "ListIdentityPools" + +// ListIdentityPoolsRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPools operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListIdentityPools for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentityPools method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentityPoolsRequest method. +// req, resp := client.ListIdentityPoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentityPools +func (c *CognitoIdentity) ListIdentityPoolsRequest(input *ListIdentityPoolsInput) (req *request.Request, output *ListIdentityPoolsOutput) { + op := &request.Operation{ + Name: opListIdentityPools, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoolsInput{} + } + + output = &ListIdentityPoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListIdentityPools API operation for Amazon Cognito Identity. +// +// Lists all of the Cognito identity pools registered for your account. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation ListIdentityPools for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentityPools +func (c *CognitoIdentity) ListIdentityPools(input *ListIdentityPoolsInput) (*ListIdentityPoolsOutput, error) { + req, out := c.ListIdentityPoolsRequest(input) + return out, req.Send() +} + +// ListIdentityPoolsWithContext is the same as ListIdentityPools with the addition of +// the ability to pass a context and additional request options. +// +// See ListIdentityPools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) ListIdentityPoolsWithContext(ctx aws.Context, input *ListIdentityPoolsInput, opts ...request.Option) (*ListIdentityPoolsOutput, error) { + req, out := c.ListIdentityPoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opLookupDeveloperIdentity = "LookupDeveloperIdentity" + +// LookupDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the LookupDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See LookupDeveloperIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the LookupDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the LookupDeveloperIdentityRequest method. +// req, resp := client.LookupDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/LookupDeveloperIdentity +func (c *CognitoIdentity) LookupDeveloperIdentityRequest(input *LookupDeveloperIdentityInput) (req *request.Request, output *LookupDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opLookupDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupDeveloperIdentityInput{} + } + + output = &LookupDeveloperIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// LookupDeveloperIdentity API operation for Amazon Cognito Identity. +// +// Retrieves the IdentityID associated with a DeveloperUserIdentifier or the +// list of DeveloperUserIdentifiers associated with an IdentityId for an existing +// identity. Either IdentityID or DeveloperUserIdentifier must not be null. +// If you supply only one of these values, the other value will be searched +// in the database and returned as a part of the response. If you supply both, +// DeveloperUserIdentifier will be matched against IdentityID. If the values +// are verified against the database, the response returns both values and is +// the same as the request. Otherwise a ResourceConflictException is thrown. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation LookupDeveloperIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/LookupDeveloperIdentity +func (c *CognitoIdentity) LookupDeveloperIdentity(input *LookupDeveloperIdentityInput) (*LookupDeveloperIdentityOutput, error) { + req, out := c.LookupDeveloperIdentityRequest(input) + return out, req.Send() +} + +// LookupDeveloperIdentityWithContext is the same as LookupDeveloperIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See LookupDeveloperIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) LookupDeveloperIdentityWithContext(ctx aws.Context, input *LookupDeveloperIdentityInput, opts ...request.Option) (*LookupDeveloperIdentityOutput, error) { + req, out := c.LookupDeveloperIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opMergeDeveloperIdentities = "MergeDeveloperIdentities" + +// MergeDeveloperIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the MergeDeveloperIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See MergeDeveloperIdentities for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MergeDeveloperIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MergeDeveloperIdentitiesRequest method. +// req, resp := client.MergeDeveloperIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MergeDeveloperIdentities +func (c *CognitoIdentity) MergeDeveloperIdentitiesRequest(input *MergeDeveloperIdentitiesInput) (req *request.Request, output *MergeDeveloperIdentitiesOutput) { + op := &request.Operation{ + Name: opMergeDeveloperIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeDeveloperIdentitiesInput{} + } + + output = &MergeDeveloperIdentitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// MergeDeveloperIdentities API operation for Amazon Cognito Identity. +// +// Merges two users having different IdentityIds, existing in the same identity +// pool, and identified by the same developer provider. You can use this action +// to request that discrete users be merged and identified as a single user +// in the Cognito environment. Cognito associates the given source user (SourceUserIdentifier) +// with the IdentityId of the DestinationUserIdentifier. Only developer-authenticated +// users can be merged. If the users to be merged are associated with the same +// public provider, but as two different users, an exception will be thrown. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation MergeDeveloperIdentities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MergeDeveloperIdentities +func (c *CognitoIdentity) MergeDeveloperIdentities(input *MergeDeveloperIdentitiesInput) (*MergeDeveloperIdentitiesOutput, error) { + req, out := c.MergeDeveloperIdentitiesRequest(input) + return out, req.Send() +} + +// MergeDeveloperIdentitiesWithContext is the same as MergeDeveloperIdentities with the addition of +// the ability to pass a context and additional request options. +// +// See MergeDeveloperIdentities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) MergeDeveloperIdentitiesWithContext(ctx aws.Context, input *MergeDeveloperIdentitiesInput, opts ...request.Option) (*MergeDeveloperIdentitiesOutput, error) { + req, out := c.MergeDeveloperIdentitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityPoolRoles = "SetIdentityPoolRoles" + +// SetIdentityPoolRolesRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityPoolRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See SetIdentityPoolRoles for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityPoolRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityPoolRolesRequest method. +// req, resp := client.SetIdentityPoolRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/SetIdentityPoolRoles +func (c *CognitoIdentity) SetIdentityPoolRolesRequest(input *SetIdentityPoolRolesInput) (req *request.Request, output *SetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opSetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityPoolRolesInput{} + } + + output = &SetIdentityPoolRolesOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityPoolRoles API operation for Amazon Cognito Identity. +// +// Sets the roles for an identity pool. These roles are used when making calls +// to GetCredentialsForIdentity action. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation SetIdentityPoolRoles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Thrown if there are parallel requests to modify a resource. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/SetIdentityPoolRoles +func (c *CognitoIdentity) SetIdentityPoolRoles(input *SetIdentityPoolRolesInput) (*SetIdentityPoolRolesOutput, error) { + req, out := c.SetIdentityPoolRolesRequest(input) + return out, req.Send() +} + +// SetIdentityPoolRolesWithContext is the same as SetIdentityPoolRoles with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityPoolRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) SetIdentityPoolRolesWithContext(ctx aws.Context, input *SetIdentityPoolRolesInput, opts ...request.Option) (*SetIdentityPoolRolesOutput, error) { + req, out := c.SetIdentityPoolRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnlinkDeveloperIdentity = "UnlinkDeveloperIdentity" + +// UnlinkDeveloperIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UnlinkDeveloperIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UnlinkDeveloperIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnlinkDeveloperIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnlinkDeveloperIdentityRequest method. +// req, resp := client.UnlinkDeveloperIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkDeveloperIdentity +func (c *CognitoIdentity) UnlinkDeveloperIdentityRequest(input *UnlinkDeveloperIdentityInput) (req *request.Request, output *UnlinkDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkDeveloperIdentityInput{} + } + + output = &UnlinkDeveloperIdentityOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UnlinkDeveloperIdentity API operation for Amazon Cognito Identity. +// +// Unlinks a DeveloperUserIdentifier from an existing identity. Unlinked developer +// users will be considered new identities next time they are seen. If, for +// a given Cognito identity, you remove all federated identities as well as +// the developer user identifier, the Cognito identity becomes inaccessible. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation UnlinkDeveloperIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkDeveloperIdentity +func (c *CognitoIdentity) UnlinkDeveloperIdentity(input *UnlinkDeveloperIdentityInput) (*UnlinkDeveloperIdentityOutput, error) { + req, out := c.UnlinkDeveloperIdentityRequest(input) + return out, req.Send() +} + +// UnlinkDeveloperIdentityWithContext is the same as UnlinkDeveloperIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See UnlinkDeveloperIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) UnlinkDeveloperIdentityWithContext(ctx aws.Context, input *UnlinkDeveloperIdentityInput, opts ...request.Option) (*UnlinkDeveloperIdentityOutput, error) { + req, out := c.UnlinkDeveloperIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnlinkIdentity = "UnlinkIdentity" + +// UnlinkIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UnlinkIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UnlinkIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnlinkIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnlinkIdentityRequest method. +// req, resp := client.UnlinkIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkIdentity +func (c *CognitoIdentity) UnlinkIdentityRequest(input *UnlinkIdentityInput) (req *request.Request, output *UnlinkIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkIdentityInput{} + } + + output = &UnlinkIdentityOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UnlinkIdentity API operation for Amazon Cognito Identity. +// +// Unlinks a federated identity from an existing account. Unlinked logins will +// be considered new identities next time they are seen. Removing the last linked +// login will make this identity inaccessible. +// +// This is a public API. You do not need any credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation UnlinkIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeExternalServiceException "ExternalServiceException" +// An exception thrown when a dependent service such as Facebook or Twitter +// is not responding +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkIdentity +func (c *CognitoIdentity) UnlinkIdentity(input *UnlinkIdentityInput) (*UnlinkIdentityOutput, error) { + req, out := c.UnlinkIdentityRequest(input) + return out, req.Send() +} + +// UnlinkIdentityWithContext is the same as UnlinkIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See UnlinkIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) UnlinkIdentityWithContext(ctx aws.Context, input *UnlinkIdentityInput, opts ...request.Option) (*UnlinkIdentityOutput, error) { + req, out := c.UnlinkIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateIdentityPool = "UpdateIdentityPool" + +// UpdateIdentityPoolRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIdentityPool operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateIdentityPool for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIdentityPool method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIdentityPoolRequest method. +// req, resp := client.UpdateIdentityPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UpdateIdentityPool +func (c *CognitoIdentity) UpdateIdentityPoolRequest(input *IdentityPool) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opUpdateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IdentityPool{} + } + + output = &IdentityPool{} + req = c.newRequest(op, input, output) + return +} + +// UpdateIdentityPool API operation for Amazon Cognito Identity. +// +// Updates a user pool. +// +// You must use AWS Developer credentials to call this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Cognito Identity's +// API operation UpdateIdentityPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Thrown for missing or bad input parameter(s). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Thrown when the requested resource (for example, a dataset or record) does +// not exist. +// +// * ErrCodeNotAuthorizedException "NotAuthorizedException" +// Thrown when a user is not authorized to access the requested resource. +// +// * ErrCodeResourceConflictException "ResourceConflictException" +// Thrown when a user tries to use a login which is already linked to another +// account. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// Thrown when a request is throttled. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// Thrown when the service encounters an error during processing the request. +// +// * ErrCodeConcurrentModificationException "ConcurrentModificationException" +// Thrown if there are parallel requests to modify a resource. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// Thrown when the total number of user pools has exceeded a preset limit. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UpdateIdentityPool +func (c *CognitoIdentity) UpdateIdentityPool(input *IdentityPool) (*IdentityPool, error) { + req, out := c.UpdateIdentityPoolRequest(input) + return out, req.Send() +} + +// UpdateIdentityPoolWithContext is the same as UpdateIdentityPool with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateIdentityPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CognitoIdentity) UpdateIdentityPoolWithContext(ctx aws.Context, input *IdentityPool, opts ...request.Option) (*IdentityPool, error) { + req, out := c.UpdateIdentityPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Input to the CreateIdentityPool action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/CreateIdentityPoolInput +type CreateIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + // + // AllowUnauthenticatedIdentities is a required field + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // An array of Amazon Cognito Identity user pools and their client IDs. + CognitoIdentityProviders []*Provider `type:"list"` + + // The "domain" by which Cognito will refer to your users. This name acts as + // a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + // + // Once you have set a developer provider name, you cannot change it. Please + // take care in setting this parameter. + DeveloperProviderName *string `min:"1" type:"string"` + + // A string that you provide. + // + // IdentityPoolName is a required field + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // An array of Amazon Resource Names (ARNs) of the SAML provider for your identity + // pool. + SamlProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIdentityPoolInput"} + if s.AllowUnauthenticatedIdentities == nil { + invalidParams.Add(request.NewErrParamRequired("AllowUnauthenticatedIdentities")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolName == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolName")) + } + if s.IdentityPoolName != nil && len(*s.IdentityPoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolName", 1)) + } + if s.CognitoIdentityProviders != nil { + for i, v := range s.CognitoIdentityProviders { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CognitoIdentityProviders", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowUnauthenticatedIdentities sets the AllowUnauthenticatedIdentities field's value. +func (s *CreateIdentityPoolInput) SetAllowUnauthenticatedIdentities(v bool) *CreateIdentityPoolInput { + s.AllowUnauthenticatedIdentities = &v + return s +} + +// SetCognitoIdentityProviders sets the CognitoIdentityProviders field's value. +func (s *CreateIdentityPoolInput) SetCognitoIdentityProviders(v []*Provider) *CreateIdentityPoolInput { + s.CognitoIdentityProviders = v + return s +} + +// SetDeveloperProviderName sets the DeveloperProviderName field's value. +func (s *CreateIdentityPoolInput) SetDeveloperProviderName(v string) *CreateIdentityPoolInput { + s.DeveloperProviderName = &v + return s +} + +// SetIdentityPoolName sets the IdentityPoolName field's value. +func (s *CreateIdentityPoolInput) SetIdentityPoolName(v string) *CreateIdentityPoolInput { + s.IdentityPoolName = &v + return s +} + +// SetOpenIdConnectProviderARNs sets the OpenIdConnectProviderARNs field's value. +func (s *CreateIdentityPoolInput) SetOpenIdConnectProviderARNs(v []*string) *CreateIdentityPoolInput { + s.OpenIdConnectProviderARNs = v + return s +} + +// SetSamlProviderARNs sets the SamlProviderARNs field's value. +func (s *CreateIdentityPoolInput) SetSamlProviderARNs(v []*string) *CreateIdentityPoolInput { + s.SamlProviderARNs = v + return s +} + +// SetSupportedLoginProviders sets the SupportedLoginProviders field's value. +func (s *CreateIdentityPoolInput) SetSupportedLoginProviders(v map[string]*string) *CreateIdentityPoolInput { + s.SupportedLoginProviders = v + return s +} + +// Credentials for the provided identity ID. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/Credentials +type Credentials struct { + _ struct{} `type:"structure"` + + // The Access Key portion of the credentials. + AccessKeyId *string `type:"string"` + + // The date at which these credentials will expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Secret Access Key portion of the credentials + SecretKey *string `type:"string"` + + // The Session Token portion of the credentials + SessionToken *string `type:"string"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretKey sets the SecretKey field's value. +func (s *Credentials) SetSecretKey(v string) *Credentials { + s.SecretKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +// Input to the DeleteIdentities action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentitiesInput +type DeleteIdentitiesInput struct { + _ struct{} `type:"structure"` + + // A list of 1-60 identities that you want to delete. + // + // IdentityIdsToDelete is a required field + IdentityIdsToDelete []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentitiesInput"} + if s.IdentityIdsToDelete == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityIdsToDelete")) + } + if s.IdentityIdsToDelete != nil && len(s.IdentityIdsToDelete) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityIdsToDelete", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityIdsToDelete sets the IdentityIdsToDelete field's value. +func (s *DeleteIdentitiesInput) SetIdentityIdsToDelete(v []*string) *DeleteIdentitiesInput { + s.IdentityIdsToDelete = v + return s +} + +// Returned in response to a successful DeleteIdentities operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentitiesResponse +type DeleteIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An array of UnprocessedIdentityId objects, each of which contains an ErrorCode + // and IdentityId. + UnprocessedIdentityIds []*UnprocessedIdentityId `type:"list"` +} + +// String returns the string representation +func (s DeleteIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesOutput) GoString() string { + return s.String() +} + +// SetUnprocessedIdentityIds sets the UnprocessedIdentityIds field's value. +func (s *DeleteIdentitiesOutput) SetUnprocessedIdentityIds(v []*UnprocessedIdentityId) *DeleteIdentitiesOutput { + s.UnprocessedIdentityIds = v + return s +} + +// Input to the DeleteIdentityPool action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentityPoolInput +type DeleteIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityPoolInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *DeleteIdentityPoolInput) SetIdentityPoolId(v string) *DeleteIdentityPoolInput { + s.IdentityPoolId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DeleteIdentityPoolOutput +type DeleteIdentityPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolOutput) GoString() string { + return s.String() +} + +// Input to the DescribeIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentityInput +type DescribeIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + // + // IdentityId is a required field + IdentityId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityId sets the IdentityId field's value. +func (s *DescribeIdentityInput) SetIdentityId(v string) *DescribeIdentityInput { + s.IdentityId = &v + return s +} + +// Input to the DescribeIdentityPool action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentityPoolInput +type DescribeIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityPoolInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *DescribeIdentityPoolInput) SetIdentityPoolId(v string) *DescribeIdentityPoolInput { + s.IdentityPoolId = &v + return s +} + +// Input to the GetCredentialsForIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetCredentialsForIdentityInput +type GetCredentialsForIdentityInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the role to be assumed when multiple roles + // were received in the token from the identity provider. For example, a SAML-based + // identity provider. This parameter is optional for identity providers that + // do not support role customization. + CustomRoleArn *string `min:"20" type:"string"` + + // A unique identifier in the format REGION:GUID. + // + // IdentityId is a required field + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCredentialsForIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCredentialsForIdentityInput"} + if s.CustomRoleArn != nil && len(*s.CustomRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("CustomRoleArn", 20)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomRoleArn sets the CustomRoleArn field's value. +func (s *GetCredentialsForIdentityInput) SetCustomRoleArn(v string) *GetCredentialsForIdentityInput { + s.CustomRoleArn = &v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetCredentialsForIdentityInput) SetIdentityId(v string) *GetCredentialsForIdentityInput { + s.IdentityId = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *GetCredentialsForIdentityInput) SetLogins(v map[string]*string) *GetCredentialsForIdentityInput { + s.Logins = v + return s +} + +// Returned in response to a successful GetCredentialsForIdentity operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetCredentialsForIdentityResponse +type GetCredentialsForIdentityOutput struct { + _ struct{} `type:"structure"` + + // Credentials for the provided identity ID. + Credentials *Credentials `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetCredentialsForIdentityOutput) SetCredentials(v *Credentials) *GetCredentialsForIdentityOutput { + s.Credentials = v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetCredentialsForIdentityOutput) SetIdentityId(v string) *GetCredentialsForIdentityOutput { + s.IdentityId = &v + return s +} + +// Input to the GetId action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdInput +type GetIdInput struct { + _ struct{} `type:"structure"` + + // A standard AWS account ID (9+ digits). + AccountId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // The available provider names for Logins are as follows: + // + // * Facebook: graph.facebook.com + // + // * Amazon Cognito Identity Provider: cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789 + // + // * Google: accounts.google.com + // + // * Amazon: www.amazon.com + // + // * Twitter: api.twitter.com + // + // * Digits: www.digits.com + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdInput"} + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetIdInput) SetAccountId(v string) *GetIdInput { + s.AccountId = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *GetIdInput) SetIdentityPoolId(v string) *GetIdInput { + s.IdentityPoolId = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *GetIdInput) SetLogins(v map[string]*string) *GetIdInput { + s.Logins = v + return s +} + +// Returned in response to a GetId request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdResponse +type GetIdOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdOutput) GoString() string { + return s.String() +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetIdOutput) SetIdentityId(v string) *GetIdOutput { + s.IdentityId = &v + return s +} + +// Input to the GetIdentityPoolRoles action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdentityPoolRolesInput +type GetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoolRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoolRolesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *GetIdentityPoolRolesInput) SetIdentityPoolId(v string) *GetIdentityPoolRolesInput { + s.IdentityPoolId = &v + return s +} + +// Returned in response to a successful GetIdentityPoolRoles operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdentityPoolRolesResponse +type GetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // How users for a specific identity provider are to mapped to roles. This is + // a String-to-RoleMapping object map. The string identifies the identity provider, + // for example, "graph.facebook.com" or "cognito-idp-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id". + RoleMappings map[string]*RoleMapping `type:"map"` + + // The map of roles associated with this pool. Currently only authenticated + // and unauthenticated roles are supported. + Roles map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *GetIdentityPoolRolesOutput) SetIdentityPoolId(v string) *GetIdentityPoolRolesOutput { + s.IdentityPoolId = &v + return s +} + +// SetRoleMappings sets the RoleMappings field's value. +func (s *GetIdentityPoolRolesOutput) SetRoleMappings(v map[string]*RoleMapping) *GetIdentityPoolRolesOutput { + s.RoleMappings = v + return s +} + +// SetRoles sets the Roles field's value. +func (s *GetIdentityPoolRolesOutput) SetRoles(v map[string]*string) *GetIdentityPoolRolesOutput { + s.Roles = v + return s +} + +// Input to the GetOpenIdTokenForDeveloperIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenForDeveloperIdentityInput +type GetOpenIdTokenForDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // Each name-value pair represents a user from a public provider or developer + // provider. If the user is from a developer provider, the name-value pair will + // follow the syntax "developer_provider_name": "developer_user_identifier". + // The developer provider is the "domain" by which Cognito will refer to your + // users; you provided this domain while creating/updating the identity pool. + // The developer user identifier is an identifier from your backend that uniquely + // identifies a user. When you create an identity pool, you can specify the + // supported logins. + // + // Logins is a required field + Logins map[string]*string `type:"map" required:"true"` + + // The expiration time of the token, in seconds. You can specify a custom expiration + // time for the token so that you can cache it. If you don't provide an expiration + // time, the token is valid for 15 minutes. You can exchange the token with + // Amazon STS for temporary AWS credentials, which are valid for a maximum of + // one hour. The maximum token duration you can set is 24 hours. You should + // take care in setting the expiration time for a token, as there are significant + // security implications: an attacker could use a leaked token to access your + // AWS resources for the token's duration. + TokenDuration *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIdTokenForDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIdTokenForDeveloperIdentityInput"} + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.Logins == nil { + invalidParams.Add(request.NewErrParamRequired("Logins")) + } + if s.TokenDuration != nil && *s.TokenDuration < 1 { + invalidParams.Add(request.NewErrParamMinValue("TokenDuration", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetOpenIdTokenForDeveloperIdentityInput) SetIdentityId(v string) *GetOpenIdTokenForDeveloperIdentityInput { + s.IdentityId = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *GetOpenIdTokenForDeveloperIdentityInput) SetIdentityPoolId(v string) *GetOpenIdTokenForDeveloperIdentityInput { + s.IdentityPoolId = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *GetOpenIdTokenForDeveloperIdentityInput) SetLogins(v map[string]*string) *GetOpenIdTokenForDeveloperIdentityInput { + s.Logins = v + return s +} + +// SetTokenDuration sets the TokenDuration field's value. +func (s *GetOpenIdTokenForDeveloperIdentityInput) SetTokenDuration(v int64) *GetOpenIdTokenForDeveloperIdentityInput { + s.TokenDuration = &v + return s +} + +// Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenForDeveloperIdentityResponse +type GetOpenIdTokenForDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetOpenIdTokenForDeveloperIdentityOutput) SetIdentityId(v string) *GetOpenIdTokenForDeveloperIdentityOutput { + s.IdentityId = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetOpenIdTokenForDeveloperIdentityOutput) SetToken(v string) *GetOpenIdTokenForDeveloperIdentityOutput { + s.Token = &v + return s +} + +// Input to the GetOpenIdToken action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenInput +type GetOpenIdTokenInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + // + // IdentityId is a required field + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // When using graph.facebook.com and www.amazon.com, supply the access_token + // returned from the provider's authflow. For accounts.google.com, an Amazon + // Cognito Identity Provider, or any other OpenId Connect provider, always include + // the id_token. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetOpenIdTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIdTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIdTokenInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetOpenIdTokenInput) SetIdentityId(v string) *GetOpenIdTokenInput { + s.IdentityId = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *GetOpenIdTokenInput) SetLogins(v map[string]*string) *GetOpenIdTokenInput { + s.Logins = v + return s +} + +// Returned in response to a successful GetOpenIdToken request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenResponse +type GetOpenIdTokenOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. Note that the IdentityId returned + // may not match the one passed on input. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token, valid for 15 minutes. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenOutput) GoString() string { + return s.String() +} + +// SetIdentityId sets the IdentityId field's value. +func (s *GetOpenIdTokenOutput) SetIdentityId(v string) *GetOpenIdTokenOutput { + s.IdentityId = &v + return s +} + +// SetToken sets the Token field's value. +func (s *GetOpenIdTokenOutput) SetToken(v string) *GetOpenIdTokenOutput { + s.Token = &v + return s +} + +// A description of the identity. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/IdentityDescription +type IdentityDescription struct { + _ struct{} `type:"structure"` + + // Date on which the identity was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // Date on which the identity was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins []*string `type:"list"` +} + +// String returns the string representation +func (s IdentityDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDescription) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *IdentityDescription) SetCreationDate(v time.Time) *IdentityDescription { + s.CreationDate = &v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *IdentityDescription) SetIdentityId(v string) *IdentityDescription { + s.IdentityId = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *IdentityDescription) SetLastModifiedDate(v time.Time) *IdentityDescription { + s.LastModifiedDate = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *IdentityDescription) SetLogins(v []*string) *IdentityDescription { + s.Logins = v + return s +} + +// An object representing an Amazon Cognito identity pool. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/IdentityPool +type IdentityPool struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + // + // AllowUnauthenticatedIdentities is a required field + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // A list representing an Amazon Cognito Identity User Pool and its client ID. + CognitoIdentityProviders []*Provider `type:"list"` + + // The "domain" by which Cognito will refer to your users. + DeveloperProviderName *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A string that you provide. + // + // IdentityPoolName is a required field + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // An array of Amazon Resource Names (ARNs) of the SAML provider for your identity + // pool. + SamlProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s IdentityPool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPool) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IdentityPool) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IdentityPool"} + if s.AllowUnauthenticatedIdentities == nil { + invalidParams.Add(request.NewErrParamRequired("AllowUnauthenticatedIdentities")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.IdentityPoolName == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolName")) + } + if s.IdentityPoolName != nil && len(*s.IdentityPoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolName", 1)) + } + if s.CognitoIdentityProviders != nil { + for i, v := range s.CognitoIdentityProviders { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CognitoIdentityProviders", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowUnauthenticatedIdentities sets the AllowUnauthenticatedIdentities field's value. +func (s *IdentityPool) SetAllowUnauthenticatedIdentities(v bool) *IdentityPool { + s.AllowUnauthenticatedIdentities = &v + return s +} + +// SetCognitoIdentityProviders sets the CognitoIdentityProviders field's value. +func (s *IdentityPool) SetCognitoIdentityProviders(v []*Provider) *IdentityPool { + s.CognitoIdentityProviders = v + return s +} + +// SetDeveloperProviderName sets the DeveloperProviderName field's value. +func (s *IdentityPool) SetDeveloperProviderName(v string) *IdentityPool { + s.DeveloperProviderName = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *IdentityPool) SetIdentityPoolId(v string) *IdentityPool { + s.IdentityPoolId = &v + return s +} + +// SetIdentityPoolName sets the IdentityPoolName field's value. +func (s *IdentityPool) SetIdentityPoolName(v string) *IdentityPool { + s.IdentityPoolName = &v + return s +} + +// SetOpenIdConnectProviderARNs sets the OpenIdConnectProviderARNs field's value. +func (s *IdentityPool) SetOpenIdConnectProviderARNs(v []*string) *IdentityPool { + s.OpenIdConnectProviderARNs = v + return s +} + +// SetSamlProviderARNs sets the SamlProviderARNs field's value. +func (s *IdentityPool) SetSamlProviderARNs(v []*string) *IdentityPool { + s.SamlProviderARNs = v + return s +} + +// SetSupportedLoginProviders sets the SupportedLoginProviders field's value. +func (s *IdentityPool) SetSupportedLoginProviders(v map[string]*string) *IdentityPool { + s.SupportedLoginProviders = v + return s +} + +// A description of the identity pool. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/IdentityPoolShortDescription +type IdentityPoolShortDescription struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IdentityPoolShortDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPoolShortDescription) GoString() string { + return s.String() +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *IdentityPoolShortDescription) SetIdentityPoolId(v string) *IdentityPoolShortDescription { + s.IdentityPoolId = &v + return s +} + +// SetIdentityPoolName sets the IdentityPoolName field's value. +func (s *IdentityPoolShortDescription) SetIdentityPoolName(v string) *IdentityPoolShortDescription { + s.IdentityPoolName = &v + return s +} + +// Input to the ListIdentities action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentitiesInput +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // An optional boolean parameter that allows you to hide disabled identities. + // If omitted, the ListIdentities API will include disabled identities in the + // response. + HideDisabled *bool `type:"boolean"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + // + // MaxResults is a required field + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentitiesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHideDisabled sets the HideDisabled field's value. +func (s *ListIdentitiesInput) SetHideDisabled(v bool) *ListIdentitiesInput { + s.HideDisabled = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *ListIdentitiesInput) SetIdentityPoolId(v string) *ListIdentitiesInput { + s.IdentityPoolId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListIdentitiesInput) SetMaxResults(v int64) *ListIdentitiesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentitiesInput) SetNextToken(v string) *ListIdentitiesInput { + s.NextToken = &v + return s +} + +// The response to a ListIdentities request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentitiesResponse +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An object containing a set of identities and associated mappings. + Identities []*IdentityDescription `type:"list"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// SetIdentities sets the Identities field's value. +func (s *ListIdentitiesOutput) SetIdentities(v []*IdentityDescription) *ListIdentitiesOutput { + s.Identities = v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *ListIdentitiesOutput) SetIdentityPoolId(v string) *ListIdentitiesOutput { + s.IdentityPoolId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentitiesOutput) SetNextToken(v string) *ListIdentitiesOutput { + s.NextToken = &v + return s +} + +// Input to the ListIdentityPools action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentityPoolsInput +type ListIdentityPoolsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of identities to return. + // + // MaxResults is a required field + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentityPoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentityPoolsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListIdentityPoolsInput) SetMaxResults(v int64) *ListIdentityPoolsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentityPoolsInput) SetNextToken(v string) *ListIdentityPoolsInput { + s.NextToken = &v + return s +} + +// The result of a successful ListIdentityPools action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentityPoolsResponse +type ListIdentityPoolsOutput struct { + _ struct{} `type:"structure"` + + // The identity pools returned by the ListIdentityPools action. + IdentityPools []*IdentityPoolShortDescription `type:"list"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsOutput) GoString() string { + return s.String() +} + +// SetIdentityPools sets the IdentityPools field's value. +func (s *ListIdentityPoolsOutput) SetIdentityPools(v []*IdentityPoolShortDescription) *ListIdentityPoolsOutput { + s.IdentityPools = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentityPoolsOutput) SetNextToken(v string) *ListIdentityPoolsOutput { + s.NextToken = &v + return s +} + +// Input to the LookupDeveloperIdentityInput action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/LookupDeveloperIdentityInput +type LookupDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique ID used by your backend authentication process to identify a user. + // Typically, a developer identity provider would issue many developer user + // identifiers, in keeping with the number of users. + DeveloperUserIdentifier *string `min:"1" type:"string"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LookupDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LookupDeveloperIdentityInput"} + if s.DeveloperUserIdentifier != nil && len(*s.DeveloperUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperUserIdentifier", 1)) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeveloperUserIdentifier sets the DeveloperUserIdentifier field's value. +func (s *LookupDeveloperIdentityInput) SetDeveloperUserIdentifier(v string) *LookupDeveloperIdentityInput { + s.DeveloperUserIdentifier = &v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *LookupDeveloperIdentityInput) SetIdentityId(v string) *LookupDeveloperIdentityInput { + s.IdentityId = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *LookupDeveloperIdentityInput) SetIdentityPoolId(v string) *LookupDeveloperIdentityInput { + s.IdentityPoolId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *LookupDeveloperIdentityInput) SetMaxResults(v int64) *LookupDeveloperIdentityInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *LookupDeveloperIdentityInput) SetNextToken(v string) *LookupDeveloperIdentityInput { + s.NextToken = &v + return s +} + +// Returned in response to a successful LookupDeveloperIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/LookupDeveloperIdentityResponse +type LookupDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // This is the list of developer user identifiers associated with an identity + // ID. Cognito supports the association of multiple developer user identifiers + // with an identity ID. + DeveloperUserIdentifierList []*string `type:"list"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// SetDeveloperUserIdentifierList sets the DeveloperUserIdentifierList field's value. +func (s *LookupDeveloperIdentityOutput) SetDeveloperUserIdentifierList(v []*string) *LookupDeveloperIdentityOutput { + s.DeveloperUserIdentifierList = v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *LookupDeveloperIdentityOutput) SetIdentityId(v string) *LookupDeveloperIdentityOutput { + s.IdentityId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *LookupDeveloperIdentityOutput) SetNextToken(v string) *LookupDeveloperIdentityOutput { + s.NextToken = &v + return s +} + +// A rule that maps a claim name, a claim value, and a match type to a role +// ARN. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MappingRule +type MappingRule struct { + _ struct{} `type:"structure"` + + // The claim name that must be present in the token, for example, "isAdmin" + // or "paid". + // + // Claim is a required field + Claim *string `min:"1" type:"string" required:"true"` + + // The match condition that specifies how closely the claim value in the IdP + // token must match Value. + // + // MatchType is a required field + MatchType *string `type:"string" required:"true" enum:"MappingRuleMatchType"` + + // The role ARN. + // + // RoleARN is a required field + RoleARN *string `min:"20" type:"string" required:"true"` + + // A brief string that the claim must match, for example, "paid" or "yes". + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MappingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MappingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MappingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MappingRule"} + if s.Claim == nil { + invalidParams.Add(request.NewErrParamRequired("Claim")) + } + if s.Claim != nil && len(*s.Claim) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Claim", 1)) + } + if s.MatchType == nil { + invalidParams.Add(request.NewErrParamRequired("MatchType")) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 20)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClaim sets the Claim field's value. +func (s *MappingRule) SetClaim(v string) *MappingRule { + s.Claim = &v + return s +} + +// SetMatchType sets the MatchType field's value. +func (s *MappingRule) SetMatchType(v string) *MappingRule { + s.MatchType = &v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *MappingRule) SetRoleARN(v string) *MappingRule { + s.RoleARN = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MappingRule) SetValue(v string) *MappingRule { + s.Value = &v + return s +} + +// Input to the MergeDeveloperIdentities action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MergeDeveloperIdentitiesInput +type MergeDeveloperIdentitiesInput struct { + _ struct{} `type:"structure"` + + // User identifier for the destination user. The value should be a DeveloperUserIdentifier. + // + // DestinationUserIdentifier is a required field + DestinationUserIdentifier *string `min:"1" type:"string" required:"true"` + + // The "domain" by which Cognito will refer to your users. This is a (pseudo) + // domain name that you provide while creating an identity pool. This name acts + // as a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + // + // DeveloperProviderName is a required field + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // User identifier for the source user. The value should be a DeveloperUserIdentifier. + // + // SourceUserIdentifier is a required field + SourceUserIdentifier *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MergeDeveloperIdentitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MergeDeveloperIdentitiesInput"} + if s.DestinationUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationUserIdentifier")) + } + if s.DestinationUserIdentifier != nil && len(*s.DestinationUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationUserIdentifier", 1)) + } + if s.DeveloperProviderName == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperProviderName")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.SourceUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceUserIdentifier")) + } + if s.SourceUserIdentifier != nil && len(*s.SourceUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceUserIdentifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationUserIdentifier sets the DestinationUserIdentifier field's value. +func (s *MergeDeveloperIdentitiesInput) SetDestinationUserIdentifier(v string) *MergeDeveloperIdentitiesInput { + s.DestinationUserIdentifier = &v + return s +} + +// SetDeveloperProviderName sets the DeveloperProviderName field's value. +func (s *MergeDeveloperIdentitiesInput) SetDeveloperProviderName(v string) *MergeDeveloperIdentitiesInput { + s.DeveloperProviderName = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *MergeDeveloperIdentitiesInput) SetIdentityPoolId(v string) *MergeDeveloperIdentitiesInput { + s.IdentityPoolId = &v + return s +} + +// SetSourceUserIdentifier sets the SourceUserIdentifier field's value. +func (s *MergeDeveloperIdentitiesInput) SetSourceUserIdentifier(v string) *MergeDeveloperIdentitiesInput { + s.SourceUserIdentifier = &v + return s +} + +// Returned in response to a successful MergeDeveloperIdentities action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MergeDeveloperIdentitiesResponse +type MergeDeveloperIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesOutput) GoString() string { + return s.String() +} + +// SetIdentityId sets the IdentityId field's value. +func (s *MergeDeveloperIdentitiesOutput) SetIdentityId(v string) *MergeDeveloperIdentitiesOutput { + s.IdentityId = &v + return s +} + +// A provider representing an Amazon Cognito Identity User Pool and its client +// ID. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/CognitoIdentityProvider +type Provider struct { + _ struct{} `type:"structure"` + + // The client ID for the Amazon Cognito Identity User Pool. + ClientId *string `min:"1" type:"string"` + + // The provider name for an Amazon Cognito Identity User Pool. For example, + // cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789. + ProviderName *string `min:"1" type:"string"` + + // TRUE if server-side token validation is enabled for the identity provider’s + // token. + ServerSideTokenCheck *bool `type:"boolean"` +} + +// String returns the string representation +func (s Provider) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Provider) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Provider) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Provider"} + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ProviderName != nil && len(*s.ProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProviderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *Provider) SetClientId(v string) *Provider { + s.ClientId = &v + return s +} + +// SetProviderName sets the ProviderName field's value. +func (s *Provider) SetProviderName(v string) *Provider { + s.ProviderName = &v + return s +} + +// SetServerSideTokenCheck sets the ServerSideTokenCheck field's value. +func (s *Provider) SetServerSideTokenCheck(v bool) *Provider { + s.ServerSideTokenCheck = &v + return s +} + +// A role mapping. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/RoleMapping +type RoleMapping struct { + _ struct{} `type:"structure"` + + // If you specify Token or Rules as the Type, AmbiguousRoleResolution is required. + // + // Specifies the action to be taken if either no rules match the claim value + // for the Rules type, or there is no cognito:preferred_role claim and there + // are multiple cognito:roles matches for the Token type. + AmbiguousRoleResolution *string `type:"string" enum:"AmbiguousRoleResolutionType"` + + // The rules to be used for mapping users to roles. + // + // If you specify Rules as the role mapping type, RulesConfiguration is required. + RulesConfiguration *RulesConfigurationType `type:"structure"` + + // The role mapping type. Token will use cognito:roles and cognito:preferred_role + // claims from the Cognito identity provider token to map groups to roles. Rules + // will attempt to match claims from the token to map to a role. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"RoleMappingType"` +} + +// String returns the string representation +func (s RoleMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleMapping) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoleMapping) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoleMapping"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.RulesConfiguration != nil { + if err := s.RulesConfiguration.Validate(); err != nil { + invalidParams.AddNested("RulesConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmbiguousRoleResolution sets the AmbiguousRoleResolution field's value. +func (s *RoleMapping) SetAmbiguousRoleResolution(v string) *RoleMapping { + s.AmbiguousRoleResolution = &v + return s +} + +// SetRulesConfiguration sets the RulesConfiguration field's value. +func (s *RoleMapping) SetRulesConfiguration(v *RulesConfigurationType) *RoleMapping { + s.RulesConfiguration = v + return s +} + +// SetType sets the Type field's value. +func (s *RoleMapping) SetType(v string) *RoleMapping { + s.Type = &v + return s +} + +// A container for rules. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/RulesConfigurationType +type RulesConfigurationType struct { + _ struct{} `type:"structure"` + + // An array of rules. You can specify up to 25 rules per identity provider. + // + // Rules are evaluated in order. The first one to match specifies the role. + // + // Rules is a required field + Rules []*MappingRule `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RulesConfigurationType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RulesConfigurationType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RulesConfigurationType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RulesConfigurationType"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil && len(s.Rules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rules", 1)) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *RulesConfigurationType) SetRules(v []*MappingRule) *RulesConfigurationType { + s.Rules = v + return s +} + +// Input to the SetIdentityPoolRoles action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/SetIdentityPoolRolesInput +type SetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // How users for a specific identity provider are to mapped to roles. This is + // a string to RoleMapping object map. The string identifies the identity provider, + // for example, "graph.facebook.com" or "cognito-idp-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id". + // + // Up to 25 rules can be specified per identity provider. + RoleMappings map[string]*RoleMapping `type:"map"` + + // The map of roles associated with this pool. For a given role, the key will + // be either "authenticated" or "unauthenticated" and the value will be the + // Role ARN. + // + // Roles is a required field + Roles map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityPoolRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityPoolRolesInput"} + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.Roles == nil { + invalidParams.Add(request.NewErrParamRequired("Roles")) + } + if s.RoleMappings != nil { + for i, v := range s.RoleMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoleMappings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *SetIdentityPoolRolesInput) SetIdentityPoolId(v string) *SetIdentityPoolRolesInput { + s.IdentityPoolId = &v + return s +} + +// SetRoleMappings sets the RoleMappings field's value. +func (s *SetIdentityPoolRolesInput) SetRoleMappings(v map[string]*RoleMapping) *SetIdentityPoolRolesInput { + s.RoleMappings = v + return s +} + +// SetRoles sets the Roles field's value. +func (s *SetIdentityPoolRolesInput) SetRoles(v map[string]*string) *SetIdentityPoolRolesInput { + s.Roles = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/SetIdentityPoolRolesOutput +type SetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkDeveloperIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkDeveloperIdentityInput +type UnlinkDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // The "domain" by which Cognito will refer to your users. + // + // DeveloperProviderName is a required field + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // A unique ID used by your backend authentication process to identify a user. + // + // DeveloperUserIdentifier is a required field + DeveloperUserIdentifier *string `min:"1" type:"string" required:"true"` + + // A unique identifier in the format REGION:GUID. + // + // IdentityId is a required field + IdentityId *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + // + // IdentityPoolId is a required field + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnlinkDeveloperIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnlinkDeveloperIdentityInput"} + if s.DeveloperProviderName == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperProviderName")) + } + if s.DeveloperProviderName != nil && len(*s.DeveloperProviderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperProviderName", 1)) + } + if s.DeveloperUserIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DeveloperUserIdentifier")) + } + if s.DeveloperUserIdentifier != nil && len(*s.DeveloperUserIdentifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeveloperUserIdentifier", 1)) + } + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.IdentityPoolId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityPoolId")) + } + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeveloperProviderName sets the DeveloperProviderName field's value. +func (s *UnlinkDeveloperIdentityInput) SetDeveloperProviderName(v string) *UnlinkDeveloperIdentityInput { + s.DeveloperProviderName = &v + return s +} + +// SetDeveloperUserIdentifier sets the DeveloperUserIdentifier field's value. +func (s *UnlinkDeveloperIdentityInput) SetDeveloperUserIdentifier(v string) *UnlinkDeveloperIdentityInput { + s.DeveloperUserIdentifier = &v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *UnlinkDeveloperIdentityInput) SetIdentityId(v string) *UnlinkDeveloperIdentityInput { + s.IdentityId = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *UnlinkDeveloperIdentityInput) SetIdentityPoolId(v string) *UnlinkDeveloperIdentityInput { + s.IdentityPoolId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkDeveloperIdentityOutput +type UnlinkDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkIdentity action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkIdentityInput +type UnlinkIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + // + // IdentityId is a required field + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // + // Logins is a required field + Logins map[string]*string `type:"map" required:"true"` + + // Provider names to unlink from this identity. + // + // LoginsToRemove is a required field + LoginsToRemove []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UnlinkIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnlinkIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnlinkIdentityInput"} + if s.IdentityId == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityId")) + } + if s.IdentityId != nil && len(*s.IdentityId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityId", 1)) + } + if s.Logins == nil { + invalidParams.Add(request.NewErrParamRequired("Logins")) + } + if s.LoginsToRemove == nil { + invalidParams.Add(request.NewErrParamRequired("LoginsToRemove")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentityId sets the IdentityId field's value. +func (s *UnlinkIdentityInput) SetIdentityId(v string) *UnlinkIdentityInput { + s.IdentityId = &v + return s +} + +// SetLogins sets the Logins field's value. +func (s *UnlinkIdentityInput) SetLogins(v map[string]*string) *UnlinkIdentityInput { + s.Logins = v + return s +} + +// SetLoginsToRemove sets the LoginsToRemove field's value. +func (s *UnlinkIdentityInput) SetLoginsToRemove(v []*string) *UnlinkIdentityInput { + s.LoginsToRemove = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnlinkIdentityOutput +type UnlinkIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityOutput) GoString() string { + return s.String() +} + +// An array of UnprocessedIdentityId objects, each of which contains an ErrorCode +// and IdentityId. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/UnprocessedIdentityId +type UnprocessedIdentityId struct { + _ struct{} `type:"structure"` + + // The error code indicating the type of error that occurred. + ErrorCode *string `type:"string" enum:"ErrorCode"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UnprocessedIdentityId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnprocessedIdentityId) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *UnprocessedIdentityId) SetErrorCode(v string) *UnprocessedIdentityId { + s.ErrorCode = &v + return s +} + +// SetIdentityId sets the IdentityId field's value. +func (s *UnprocessedIdentityId) SetIdentityId(v string) *UnprocessedIdentityId { + s.IdentityId = &v + return s +} + +const ( + // AmbiguousRoleResolutionTypeAuthenticatedRole is a AmbiguousRoleResolutionType enum value + AmbiguousRoleResolutionTypeAuthenticatedRole = "AuthenticatedRole" + + // AmbiguousRoleResolutionTypeDeny is a AmbiguousRoleResolutionType enum value + AmbiguousRoleResolutionTypeDeny = "Deny" +) + +const ( + // ErrorCodeAccessDenied is a ErrorCode enum value + ErrorCodeAccessDenied = "AccessDenied" + + // ErrorCodeInternalServerError is a ErrorCode enum value + ErrorCodeInternalServerError = "InternalServerError" +) + +const ( + // MappingRuleMatchTypeEquals is a MappingRuleMatchType enum value + MappingRuleMatchTypeEquals = "Equals" + + // MappingRuleMatchTypeContains is a MappingRuleMatchType enum value + MappingRuleMatchTypeContains = "Contains" + + // MappingRuleMatchTypeStartsWith is a MappingRuleMatchType enum value + MappingRuleMatchTypeStartsWith = "StartsWith" + + // MappingRuleMatchTypeNotEqual is a MappingRuleMatchType enum value + MappingRuleMatchTypeNotEqual = "NotEqual" +) + +const ( + // RoleMappingTypeToken is a RoleMappingType enum value + RoleMappingTypeToken = "Token" + + // RoleMappingTypeRules is a RoleMappingType enum value + RoleMappingTypeRules = "Rules" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go new file mode 100644 index 000000000..4bf243c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go @@ -0,0 +1,12 @@ +package cognitoidentity + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opGetOpenIdToken, opGetId, opGetCredentialsForIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/errors.go new file mode 100644 index 000000000..9094d135b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/errors.go @@ -0,0 +1,77 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cognitoidentity + +const ( + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // Thrown if there are parallel requests to modify a resource. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + + // ErrCodeDeveloperUserAlreadyRegisteredException for service response error code + // "DeveloperUserAlreadyRegisteredException". + // + // The provided developer user identifier is already registered with Cognito + // under a different identity ID. + ErrCodeDeveloperUserAlreadyRegisteredException = "DeveloperUserAlreadyRegisteredException" + + // ErrCodeExternalServiceException for service response error code + // "ExternalServiceException". + // + // An exception thrown when a dependent service such as Facebook or Twitter + // is not responding + ErrCodeExternalServiceException = "ExternalServiceException" + + // ErrCodeInternalErrorException for service response error code + // "InternalErrorException". + // + // Thrown when the service encounters an error during processing the request. + ErrCodeInternalErrorException = "InternalErrorException" + + // ErrCodeInvalidIdentityPoolConfigurationException for service response error code + // "InvalidIdentityPoolConfigurationException". + // + // Thrown if the identity pool has no role associated for the given auth type + // (auth/unauth) or if the AssumeRole fails. + ErrCodeInvalidIdentityPoolConfigurationException = "InvalidIdentityPoolConfigurationException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // Thrown for missing or bad input parameter(s). + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // Thrown when the total number of user pools has exceeded a preset limit. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeNotAuthorizedException for service response error code + // "NotAuthorizedException". + // + // Thrown when a user is not authorized to access the requested resource. + ErrCodeNotAuthorizedException = "NotAuthorizedException" + + // ErrCodeResourceConflictException for service response error code + // "ResourceConflictException". + // + // Thrown when a user tries to use a login which is already linked to another + // account. + ErrCodeResourceConflictException = "ResourceConflictException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // Thrown when the requested resource (for example, a dataset or record) does + // not exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Thrown when a request is throttled. + ErrCodeTooManyRequestsException = "TooManyRequestsException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go new file mode 100644 index 000000000..8461c6bfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -0,0 +1,124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cognitoidentity + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Amazon Cognito is a web service that delivers scoped temporary credentials +// to mobile devices and other untrusted environments. Amazon Cognito uniquely +// identifies a device and supplies the user with a consistent identity over +// the lifetime of an application. +// +// Using Amazon Cognito, you can enable authentication with one or more third-party +// identity providers (Facebook, Google, or Login with Amazon), and you can +// also choose to support unauthenticated access from your app. Cognito delivers +// a unique identifier for each user and acts as an OpenID token provider trusted +// by AWS Security Token Service (STS) to access temporary, limited-privilege +// AWS credentials. +// +// To provide end-user credentials, first make an unsigned call to GetId. If +// the end user is authenticated with one of the supported identity providers, +// set the Logins map with the identity provider token. GetId returns a unique +// identifier for the user. +// +// Next, make an unsigned call to GetCredentialsForIdentity. This call expects +// the same Logins map as the GetId call, as well as the IdentityID originally +// returned by GetId. Assuming your identity pool has been configured via the +// SetIdentityPoolRoles operation, GetCredentialsForIdentity will return AWS +// credentials for your use. If your pool has not been configured with SetIdentityPoolRoles, +// or if you want to follow legacy flow, make an unsigned call to GetOpenIdToken, +// which returns the OpenID token necessary to call STS and retrieve AWS credentials. +// This call expects the same Logins map as the GetId call, as well as the IdentityID +// originally returned by GetId. The token returned by GetOpenIdToken can be +// passed to the STS operation AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) +// to retrieve AWS credentials. +// +// If you want to use Amazon Cognito in an Android, iOS, or Unity application, +// you will probably want to make API calls via the AWS Mobile SDK. To learn +// more, see the AWS Mobile SDK Developer Guide (http://docs.aws.amazon.com/mobile/index.html). +// The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30 +type CognitoIdentity struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "cognito-identity" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the CognitoIdentity client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoIdentity client from just a session. +// svc := cognitoidentity.New(mySession) +// +// // Create a CognitoIdentity client with additional configuration +// svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentity { + svc := &CognitoIdentity{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-30", + JSONVersion: "1.1", + TargetPrefix: "AWSCognitoIdentityService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoIdentity operation and runs any +// custom request initialization. +func (c *CognitoIdentity) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/vendor.json b/vendor/vendor.json index e6ff9097c..988b6f8e9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -830,6 +830,14 @@ "version": "v1.8.10", "versionExact": "v1.8.10" }, + { + "checksumSHA1": "L8F5aJdwCvoNwrP6prtHSdklijM=", + "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", + "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", + "revisionTime": "2017-04-06T18:01:00Z", + "version": "=v1.8.10", + "versionExact": "v1.8.10" + }, { "checksumSHA1": "gSm1lj0J4klQMw7jHE0fU/RV+4Y=", "path": "github.com/aws/aws-sdk-go/service/configservice", From 9893abe95db95c3fecae0362c8f483c9c51de689 Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Wed, 19 Apr 2017 13:27:33 -0400 Subject: [PATCH 218/342] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2a2ed51d..3ed4b4a29 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ IMPROVEMENTS: * provider/aws: Add an option to skip getting the supported EC2 platforms [GH-13672] * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` [GH-13273] * provider/azurerm: VM Scale Sets - import support [GH-13464] + * provider/azurerm: Allow Azure China region support [GH-13767] * provider/digitalocean: Export droplet prices [GH-13720] * provider/google: `google_compute_address` and `google_compute_global_address` are now importable [GH-13270] * provider/vault: `vault_generic_secret` resource can now optionally detect drift if it has appropriate access [GH-11776] From f5df4422aeb826234bf127fd65399d24f996532c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 19 Apr 2017 20:55:37 +0300 Subject: [PATCH 219/342] provider/aws: Bump AWS SDK to 1.8.13 (#13775) --- vendor/github.com/aws/aws-sdk-go/CHANGELOG.md | 37 + .../aws/aws-sdk-go/aws/client/client.go | 6 +- .../aws-sdk-go/aws/credentials/credentials.go | 21 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 46 +- .../aws/aws-sdk-go/aws/request/retryer.go | 69 +- .../aws/request/serialization_error.go | 19 + .../aws/aws-sdk-go/aws/request/waiter.go | 12 +- .../aws/aws-sdk-go/aws/session/doc.go | 4 +- vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/apigateway/api.go | 951 ++++++++++++++++-- .../aws/aws-sdk-go/service/lambda/api.go | 592 ++++++++++- .../aws/aws-sdk-go/service/opsworks/api.go | 933 +++++++++++++---- .../aws-sdk-go/service/opsworks/service.go | 28 +- .../aws/aws-sdk-go/service/redshift/api.go | 307 ++++++ .../aws/aws-sdk-go/service/redshift/errors.go | 7 + vendor/vendor.json | 654 ++++++------ 18 files changed, 3043 insertions(+), 686 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/serialization_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index 3cb311671..257bf0a68 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,40 @@ +Release v1.8.13 (2017-04-18) +=== + +### Service Client Updates +* `service/lambda`: Updates service API and documentation + * You can use tags to group and filter your Lambda functions, making it easier to analyze them for billing allocation purposes. For more information, see Tagging Lambda Functions. You can now write or upgrade your Lambda functions using Python version 3.6. For more information, see Programming Model for Authoring Lambda Functions in Python. Note: Features will be rolled out in the US regions on 4/19. + +### SDK Enhancements +* `aws/request`: add support for appengine's custom standard library (#1190) + * Remove syscall error checking on appengine platforms. + +Release v1.8.12 (2017-04-11) +=== + +### Service Client Updates +* `service/apigateway`: Updates service API and documentation + * API Gateway request validators +* `service/batch`: Updates service API and documentation + * API Update for AWS Batch: Customer provided AMI for MANAGED Compute Environment +* `service/gamelift`: Updates service API and documentation + * Allows developers to utilize an improved workflow when calling our Queues API and introduces a new feature that allows developers to specify a maximum allowable latency per Queue. +* `service/opsworks`: Updates service API, documentation, and paginators + * Cloudwatch Logs agent configuration can now be attached to OpsWorks Layers using CreateLayer and UpdateLayer. OpsWorks will then automatically install and manage the CloudWatch Logs agent on the instances part of the OpsWorks Layer. + +### SDK Bugs +* `aws/client`: Fix clients polluting handler list (#1197) + * Fixes the clients potentially polluting the passed in handler list with the client's customizations. This change ensures every client always works with a clean copy of the request handlers and it cannot pollute the handlers back upstream. + * Fixes #1184 +* `aws/request`: Fix waiter error match condition (#1195) + * Fixes the waiters's matching overwriting the request's err, effectively ignoring the error condition. This broke waiters with the FailureWaiterState matcher state. +Release v1.8.11 (2017-04-07) +=== + +### Service Client Updates +* `service/redshift`: Updates service API, documentation, and paginators + * This update adds the GetClusterCredentials API which is used to get temporary login credentials to the cluster. AccountWithRestoreAccess now has a new member AccountAlias, this is the identifier of the AWS support account authorized to restore the specified snapshot. This is added to support the feature where the customer can share their snapshot with the Amazon Redshift Support Account without having to manually specify the AWS Redshift Service account ID on the AWS Console/API. + Release v1.8.10 (2017-04-06) === diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 17fc76a0f..b2428c286 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -46,7 +46,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op svc := &Client{ Config: cfg, ClientInfo: info, - Handlers: handlers, + Handlers: handlers.Copy(), } switch retryer, ok := cfg.Retryer.(request.Retryer); { @@ -86,8 +86,8 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) } const logReqMsg = `DEBUG: Request %s/%s Details: diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index c29baf001..03630cf0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -97,6 +97,27 @@ type Provider interface { IsExpired() bool } +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 110ca8367..07afe3b8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -10,10 +10,12 @@ package defaults import ( "fmt" "net/http" + "net/url" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" @@ -96,23 +98,51 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti }) } -// RemoteCredProvider returns a credenitials provider for the default remote +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } - if len(ecsCredURI) > 0 { - return ecsCredProvider(cfg, handlers, ecsCredURI) + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } -func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { - const host = `169.254.170.2` +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string - return endpointcreds.NewProviderClient(cfg, handlers, - fmt.Sprintf("http://%s%s", host, uri), + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 632cd7099..bf07a1e9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,9 +1,6 @@ package request import ( - "net" - "os" - "syscall" "time" "github.com/aws/aws-sdk-go/aws" @@ -82,46 +79,64 @@ func isSerializationErrorRetryable(err error) bool { return isCodeRetryable(aerr.Code()) } - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } - } - - return false + return isErrConnectionReset(err) } // IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok && err.Code() != ErrCodeSerialization { - return isCodeRetryable(err.Code()) +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() != ErrCodeSerialization { + return isCodeRetryable(aerr.Code()) } else if ok { - return isSerializationErrorRetryable(err.OrigErr()) + return isSerializationErrorRetryable(aerr.OrigErr()) } } return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) } } return false } -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) } } return false } + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/serialization_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/serialization_error.go new file mode 100644 index 000000000..10fc8cb24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/serialization_error.go @@ -0,0 +1,19 @@ +// +build !appengine + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 354c3812e..854b0854a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -178,14 +178,8 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error { // See if any of the acceptors match the request's response, or error for _, a := range w.Acceptors { - var matched bool - matched, err = a.match(w.Name, w.Logger, req, err) - if err != nil { - // Error occurred during current waiter call - return err - } else if matched { - // Match was found can stop here and return - return nil + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr } } @@ -274,7 +268,7 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro return true, nil case FailureWaiterState: // Waiter failure state triggered - return false, awserr.New("ResourceNotReady", + return true, awserr.New(WaiterResourceNotReadyErrorCode, "failed waiting for successful resource state", err) case RetryWaiterState: // clear the error and retry the operation diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 660d9bef9..2fe35e74d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. Alternatively you can explicitly create a Session with shared config enabled. To do this you can use NewSessionWithOptions to configure how the Session will be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG environment variable was set. Creating Sessions @@ -84,7 +84,7 @@ override the shared config state (AWS_SDK_LOAD_CONFIG). // Force enable Shared Config support sess := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: SharedConfigEnable, + SharedConfigState: session.SharedConfigEnable, })) Adding Handlers diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index bf7714152..a58e54551 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.8.10" +const SDKVersion = "1.8.13" diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index dd18497a9..7d281f77f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -696,6 +696,90 @@ func (c *APIGateway) CreateModelWithContext(ctx aws.Context, input *CreateModelI return out, req.Send() } +const opCreateRequestValidator = "CreateRequestValidator" + +// CreateRequestValidatorRequest generates a "aws/request.Request" representing the +// client's request for the CreateRequestValidator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateRequestValidator for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRequestValidator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRequestValidatorRequest method. +// req, resp := client.CreateRequestValidatorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) CreateRequestValidatorRequest(input *CreateRequestValidatorInput) (req *request.Request, output *UpdateRequestValidatorOutput) { + op := &request.Operation{ + Name: opCreateRequestValidator, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/requestvalidators", + } + + if input == nil { + input = &CreateRequestValidatorInput{} + } + + output = &UpdateRequestValidatorOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRequestValidator API operation for Amazon API Gateway. +// +// Creates a ReqeustValidator of a given RestApi. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation CreateRequestValidator for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeLimitExceededException "LimitExceededException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *APIGateway) CreateRequestValidator(input *CreateRequestValidatorInput) (*UpdateRequestValidatorOutput, error) { + req, out := c.CreateRequestValidatorRequest(input) + return out, req.Send() +} + +// CreateRequestValidatorWithContext is the same as CreateRequestValidator with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRequestValidator for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) CreateRequestValidatorWithContext(ctx aws.Context, input *CreateRequestValidatorInput, opts ...request.Option) (*UpdateRequestValidatorOutput, error) { + req, out := c.CreateRequestValidatorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateResource = "CreateResource" // CreateResourceRequest generates a "aws/request.Request" representing the @@ -2219,6 +2303,92 @@ func (c *APIGateway) DeleteModelWithContext(ctx aws.Context, input *DeleteModelI return out, req.Send() } +const opDeleteRequestValidator = "DeleteRequestValidator" + +// DeleteRequestValidatorRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRequestValidator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteRequestValidator for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRequestValidator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRequestValidatorRequest method. +// req, resp := client.DeleteRequestValidatorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) DeleteRequestValidatorRequest(input *DeleteRequestValidatorInput) (req *request.Request, output *DeleteRequestValidatorOutput) { + op := &request.Operation{ + Name: opDeleteRequestValidator, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/requestvalidators/{requestvalidator_id}", + } + + if input == nil { + input = &DeleteRequestValidatorInput{} + } + + output = &DeleteRequestValidatorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRequestValidator API operation for Amazon API Gateway. +// +// Deletes a RequestValidator of a given RestApi. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation DeleteRequestValidator for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeConflictException "ConflictException" +// +func (c *APIGateway) DeleteRequestValidator(input *DeleteRequestValidatorInput) (*DeleteRequestValidatorOutput, error) { + req, out := c.DeleteRequestValidatorRequest(input) + return out, req.Send() +} + +// DeleteRequestValidatorWithContext is the same as DeleteRequestValidator with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRequestValidator for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) DeleteRequestValidatorWithContext(ctx aws.Context, input *DeleteRequestValidatorInput, opts ...request.Option) (*DeleteRequestValidatorOutput, error) { + req, out := c.DeleteRequestValidatorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteResource = "DeleteResource" // DeleteResourceRequest generates a "aws/request.Request" representing the @@ -5244,6 +5414,168 @@ func (c *APIGateway) GetModelsPagesWithContext(ctx aws.Context, input *GetModels return p.Err() } +const opGetRequestValidator = "GetRequestValidator" + +// GetRequestValidatorRequest generates a "aws/request.Request" representing the +// client's request for the GetRequestValidator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetRequestValidator for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRequestValidator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRequestValidatorRequest method. +// req, resp := client.GetRequestValidatorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) GetRequestValidatorRequest(input *GetRequestValidatorInput) (req *request.Request, output *UpdateRequestValidatorOutput) { + op := &request.Operation{ + Name: opGetRequestValidator, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/requestvalidators/{requestvalidator_id}", + } + + if input == nil { + input = &GetRequestValidatorInput{} + } + + output = &UpdateRequestValidatorOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRequestValidator API operation for Amazon API Gateway. +// +// Gets a RequestValidator of a given RestApi. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation GetRequestValidator for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *APIGateway) GetRequestValidator(input *GetRequestValidatorInput) (*UpdateRequestValidatorOutput, error) { + req, out := c.GetRequestValidatorRequest(input) + return out, req.Send() +} + +// GetRequestValidatorWithContext is the same as GetRequestValidator with the addition of +// the ability to pass a context and additional request options. +// +// See GetRequestValidator for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) GetRequestValidatorWithContext(ctx aws.Context, input *GetRequestValidatorInput, opts ...request.Option) (*UpdateRequestValidatorOutput, error) { + req, out := c.GetRequestValidatorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRequestValidators = "GetRequestValidators" + +// GetRequestValidatorsRequest generates a "aws/request.Request" representing the +// client's request for the GetRequestValidators operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetRequestValidators for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRequestValidators method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRequestValidatorsRequest method. +// req, resp := client.GetRequestValidatorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) GetRequestValidatorsRequest(input *GetRequestValidatorsInput) (req *request.Request, output *GetRequestValidatorsOutput) { + op := &request.Operation{ + Name: opGetRequestValidators, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/requestvalidators", + } + + if input == nil { + input = &GetRequestValidatorsInput{} + } + + output = &GetRequestValidatorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRequestValidators API operation for Amazon API Gateway. +// +// Gets the RequestValidators collection of a given RestApi. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation GetRequestValidators for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *APIGateway) GetRequestValidators(input *GetRequestValidatorsInput) (*GetRequestValidatorsOutput, error) { + req, out := c.GetRequestValidatorsRequest(input) + return out, req.Send() +} + +// GetRequestValidatorsWithContext is the same as GetRequestValidators with the addition of +// the ability to pass a context and additional request options. +// +// See GetRequestValidators for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) GetRequestValidatorsWithContext(ctx aws.Context, input *GetRequestValidatorsInput, opts ...request.Option) (*GetRequestValidatorsOutput, error) { + req, out := c.GetRequestValidatorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetResource = "GetResource" // GetResourceRequest generates a "aws/request.Request" representing the @@ -8679,6 +9011,88 @@ func (c *APIGateway) UpdateModelWithContext(ctx aws.Context, input *UpdateModelI return out, req.Send() } +const opUpdateRequestValidator = "UpdateRequestValidator" + +// UpdateRequestValidatorRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRequestValidator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UpdateRequestValidator for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRequestValidator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRequestValidatorRequest method. +// req, resp := client.UpdateRequestValidatorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *APIGateway) UpdateRequestValidatorRequest(input *UpdateRequestValidatorInput) (req *request.Request, output *UpdateRequestValidatorOutput) { + op := &request.Operation{ + Name: opUpdateRequestValidator, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/requestvalidators/{requestvalidator_id}", + } + + if input == nil { + input = &UpdateRequestValidatorInput{} + } + + output = &UpdateRequestValidatorOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRequestValidator API operation for Amazon API Gateway. +// +// Updates a RequestValidator of a given RestApi. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon API Gateway's +// API operation UpdateRequestValidator for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnauthorizedException "UnauthorizedException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *APIGateway) UpdateRequestValidator(input *UpdateRequestValidatorInput) (*UpdateRequestValidatorOutput, error) { + req, out := c.UpdateRequestValidatorRequest(input) + return out, req.Send() +} + +// UpdateRequestValidatorWithContext is the same as UpdateRequestValidator with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRequestValidator for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *APIGateway) UpdateRequestValidatorWithContext(ctx aws.Context, input *UpdateRequestValidatorInput, opts ...request.Option) (*UpdateRequestValidatorOutput, error) { + req, out := c.UpdateRequestValidatorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateResource = "UpdateResource" // UpdateResourceRequest generates a "aws/request.Request" representing the @@ -10278,6 +10692,75 @@ func (s *CreateModelInput) SetSchema(v string) *CreateModelInput { return s } +// Creates a RequestValidator of a given RestApi. +type CreateRequestValidatorInput struct { + _ struct{} `type:"structure"` + + // The name of the to-be-created RequestValidator. + Name *string `locationName:"name" type:"string"` + + // [Required] The identifier of the RestApi for which the RequestValidator is + // created. + // + // RestApiId is a required field + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // A Boolean flag to indicate whether to validate request body according to + // the configured model schema for the method (true) or not (false). + ValidateRequestBody *bool `locationName:"validateRequestBody" type:"boolean"` + + // A Boolean flag to indicate whether to validate request parameters, true, + // or not false. + ValidateRequestParameters *bool `locationName:"validateRequestParameters" type:"boolean"` +} + +// String returns the string representation +func (s CreateRequestValidatorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRequestValidatorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRequestValidatorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRequestValidatorInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateRequestValidatorInput) SetName(v string) *CreateRequestValidatorInput { + s.Name = &v + return s +} + +// SetRestApiId sets the RestApiId field's value. +func (s *CreateRequestValidatorInput) SetRestApiId(v string) *CreateRequestValidatorInput { + s.RestApiId = &v + return s +} + +// SetValidateRequestBody sets the ValidateRequestBody field's value. +func (s *CreateRequestValidatorInput) SetValidateRequestBody(v bool) *CreateRequestValidatorInput { + s.ValidateRequestBody = &v + return s +} + +// SetValidateRequestParameters sets the ValidateRequestParameters field's value. +func (s *CreateRequestValidatorInput) SetValidateRequestParameters(v bool) *CreateRequestValidatorInput { + s.ValidateRequestParameters = &v + return s +} + // Requests Amazon API Gateway to create a Resource resource. type CreateResourceInput struct { _ struct{} `type:"structure"` @@ -11595,6 +12078,74 @@ func (s DeleteModelOutput) GoString() string { return s.String() } +// Deletes a specified RequestValidator of a given RestApi. +type DeleteRequestValidatorInput struct { + _ struct{} `type:"structure"` + + // [Required] The identifier of the RequestValidator to be deleted. + // + // RequestValidatorId is a required field + RequestValidatorId *string `location:"uri" locationName:"requestvalidator_id" type:"string" required:"true"` + + // [Required] The identifier of the RestApi from which the given RequestValidator + // is deleted. + // + // RestApiId is a required field + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRequestValidatorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequestValidatorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRequestValidatorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRequestValidatorInput"} + if s.RequestValidatorId == nil { + invalidParams.Add(request.NewErrParamRequired("RequestValidatorId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestValidatorId sets the RequestValidatorId field's value. +func (s *DeleteRequestValidatorInput) SetRequestValidatorId(v string) *DeleteRequestValidatorInput { + s.RequestValidatorId = &v + return s +} + +// SetRestApiId sets the RestApiId field's value. +func (s *DeleteRequestValidatorInput) SetRestApiId(v string) *DeleteRequestValidatorInput { + s.RestApiId = &v + return s +} + +type DeleteRequestValidatorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRequestValidatorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequestValidatorOutput) GoString() string { + return s.String() +} + // Request to delete a Resource. type DeleteResourceInput struct { _ struct{} `type:"structure"` @@ -12487,7 +13038,7 @@ type GetApiKeysInput struct { // The name of queried API keys. NameQuery *string `location:"querystring" locationName:"name" type:"string"` - // The position of the current ApiKeys resource to get information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -12632,11 +13183,10 @@ func (s *GetAuthorizerInput) SetRestApiId(v string) *GetAuthorizerInput { type GetAuthorizersInput struct { _ struct{} `type:"structure"` - // Limit the number of Authorizer resources in the response. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // If not all Authorizer resources in the response were present, the position - // will specify where to start the next page of results. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The RestApi identifier for the Authorizers resource. @@ -12785,13 +13335,11 @@ type GetBasePathMappingsInput struct { // DomainName is a required field DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` - // The maximum number of BasePathMapping resources in the collection to get - // information about. The default limit is 25. It should be an integer between - // 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the current BasePathMapping resource in the collection to - // get information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -12914,13 +13462,11 @@ func (s *GetClientCertificateInput) SetClientCertificateId(v string) *GetClientC type GetClientCertificatesInput struct { _ struct{} `type:"structure"` - // The maximum number of ClientCertificate resources in the collection to get - // information about. The default limit is 25. It should be an integer between - // 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the current ClientCertificate resource in the collection - // to get information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -13039,12 +13585,11 @@ func (s *GetDeploymentInput) SetRestApiId(v string) *GetDeploymentInput { type GetDeploymentsInput struct { _ struct{} `type:"structure"` - // The maximum number of Deployment resources in the collection to get information - // about. The default limit is 25. It should be an integer between 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the current Deployment resource in the collection to get - // information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The identifier of the RestApi resource for the collection of Deployment resources @@ -13198,7 +13743,7 @@ func (s *GetDocumentationPartInput) SetRestApiId(v string) *GetDocumentationPart type GetDocumentationPartsInput struct { _ struct{} `type:"structure"` - // The size of the paged results. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` // The name of API entities of the to-be-retrieved documentation parts. @@ -13207,8 +13752,7 @@ type GetDocumentationPartsInput struct { // The path of API entities of the to-be-retrieved documentation parts. Path *string `location:"querystring" locationName:"path" type:"string"` - // The position of the to-be-retrieved documentation part in the DocumentationParts - // collection. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // [Required] The identifier of the API of the to-be-retrieved documentation @@ -13373,11 +13917,10 @@ func (s *GetDocumentationVersionInput) SetRestApiId(v string) *GetDocumentationV type GetDocumentationVersionsInput struct { _ struct{} `type:"structure"` - // The page size of the returned documentation versions. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the returned DocumentationVersion in the DocumentationVersions - // collection. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // [Required] The identifier of an API of the to-be-retrieved documentation @@ -13510,11 +14053,11 @@ func (s *GetDomainNameInput) SetDomainName(v string) *GetDomainNameInput { type GetDomainNamesInput struct { _ struct{} `type:"structure"` - // The maximum number of DomainName resources in the collection to get information - // about. The default limit is 25. It should be an integer between 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the current domain names to get information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -14155,12 +14698,11 @@ func (s *GetModelTemplateOutput) SetValue(v string) *GetModelTemplateOutput { type GetModelsInput struct { _ struct{} `type:"structure"` - // The maximum number of models in the collection to get information about. - // The default limit is 25. It should be an integer between 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the next set of results in the Models resource to get information - // about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The RestApi identifier. @@ -14244,6 +14786,156 @@ func (s *GetModelsOutput) SetPosition(v string) *GetModelsOutput { return s } +// Gets a RequestValidator of a given RestApi. +type GetRequestValidatorInput struct { + _ struct{} `type:"structure"` + + // [Required] The identifier of the RequestValidator to be retrieved. + // + // RequestValidatorId is a required field + RequestValidatorId *string `location:"uri" locationName:"requestvalidator_id" type:"string" required:"true"` + + // [Required] The identifier of the RestApi to which the specified RequestValidator + // belongs. + // + // RestApiId is a required field + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRequestValidatorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRequestValidatorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRequestValidatorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRequestValidatorInput"} + if s.RequestValidatorId == nil { + invalidParams.Add(request.NewErrParamRequired("RequestValidatorId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestValidatorId sets the RequestValidatorId field's value. +func (s *GetRequestValidatorInput) SetRequestValidatorId(v string) *GetRequestValidatorInput { + s.RequestValidatorId = &v + return s +} + +// SetRestApiId sets the RestApiId field's value. +func (s *GetRequestValidatorInput) SetRestApiId(v string) *GetRequestValidatorInput { + s.RestApiId = &v + return s +} + +// Gets the RequestValidators collection of a given RestApi. +type GetRequestValidatorsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of returned results per page. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The current pagination position in the paged result set. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // [Required] The identifier of a RestApi to which the RequestValidators collection + // belongs. + // + // RestApiId is a required field + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRequestValidatorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRequestValidatorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRequestValidatorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRequestValidatorsInput"} + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *GetRequestValidatorsInput) SetLimit(v int64) *GetRequestValidatorsInput { + s.Limit = &v + return s +} + +// SetPosition sets the Position field's value. +func (s *GetRequestValidatorsInput) SetPosition(v string) *GetRequestValidatorsInput { + s.Position = &v + return s +} + +// SetRestApiId sets the RestApiId field's value. +func (s *GetRequestValidatorsInput) SetRestApiId(v string) *GetRequestValidatorsInput { + s.RestApiId = &v + return s +} + +// A collection of RequestValidator resources of a given RestApi. +// +// In Swagger, the RequestValidators of an API is defined by the x-amazon-apigateway-request-validators +// (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html#api-gateway-swagger-extensions-request-validators.html) +// extension. +// +// Enable Basic Request Validation in API Gateway (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-request-validation.html) +type GetRequestValidatorsOutput struct { + _ struct{} `type:"structure"` + + // The current page of RequestValidator resources in the RequestValidators collection. + Items []*UpdateRequestValidatorOutput `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetRequestValidatorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRequestValidatorsOutput) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *GetRequestValidatorsOutput) SetItems(v []*UpdateRequestValidatorOutput) *GetRequestValidatorsOutput { + s.Items = v + return s +} + +// SetPosition sets the Position field's value. +func (s *GetRequestValidatorsOutput) SetPosition(v string) *GetRequestValidatorsOutput { + s.Position = &v + return s +} + // Request to list information about a resource. type GetResourceInput struct { _ struct{} `type:"structure"` @@ -14301,12 +14993,11 @@ func (s *GetResourceInput) SetRestApiId(v string) *GetResourceInput { type GetResourcesInput struct { _ struct{} `type:"structure"` - // The maximum number of Resource resources in the collection to get information - // about. The default limit is 25. It should be an integer between 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the next set of results in the current Resources resource - // to get information about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The RestApi identifier for the Resource. @@ -14433,12 +15124,11 @@ func (s *GetRestApiInput) SetRestApiId(v string) *GetRestApiInput { type GetRestApisInput struct { _ struct{} `type:"structure"` - // The maximum number of RestApi resources in the collection to get information - // about. The default limit is 25. It should be an integer between 1 - 500. + // The maximum number of returned results per page. The value is 25 by default + // and could be between 1 - 500. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the current RestApis resource in the collection to get information - // about. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -14664,10 +15354,10 @@ func (s *GetSdkTypeInput) SetId(v string) *GetSdkTypeInput { type GetSdkTypesInput struct { _ struct{} `type:"structure"` - // The maximum number of SdkType instances to be returned. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The position of the last fetched element in the SdkTypes collection. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -14866,10 +15556,10 @@ type GetUsageInput struct { // The Id of the API key associated with the resultant usage data. KeyId *string `location:"querystring" locationName:"keyId" type:"string"` - // The maximum number of results to be returned. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // Position + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The starting date (e.g., 2016-01-01) of the usage data. @@ -15047,15 +15737,13 @@ func (s *GetUsagePlanKeyInput) SetUsagePlanId(v string) *GetUsagePlanKeyInput { type GetUsagePlanKeysInput struct { _ struct{} `type:"structure"` - // A query parameter specifying the maximum number usage plan keys returned - // by the GET request. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` // A query parameter specifying the name of the to-be-returned usage plan keys. NameQuery *string `location:"querystring" locationName:"name" type:"string"` - // A query parameter specifying the zero-based index specifying the position - // of a usage plan key. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` // The Id of the UsagePlan resource representing the usage plan containing the @@ -15154,11 +15842,10 @@ type GetUsagePlansInput struct { // The identifier of the API key associated with the usage plans. KeyId *string `location:"querystring" locationName:"keyId" type:"string"` - // The number of UsagePlan resources to be returned as the result. + // The maximum number of returned results per page. Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` - // The zero-based array index specifying the position of the to-be-retrieved - // UsagePlan resource. + // The current pagination position in the paged result set. Position *string `location:"querystring" locationName:"position" type:"string"` } @@ -15864,7 +16551,9 @@ type Method struct { // method. ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` - // The method's authorization type. + // The method's authorization type. Valid values are NONE for open access, AWS_IAM + // for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS + // for using a Cognito user pool. AuthorizationType *string `locationName:"authorizationType" type:"string"` // The identifier of an Authorizer to use on this method. The authorizationType @@ -15965,6 +16654,9 @@ type Method struct { // parameter names defined here are available in Integration to be mapped to // integration request parameters or templates. RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` + + // The identifier of a RequestValidator for request validation. + RequestValidatorId *string `locationName:"requestValidatorId" type:"string"` } // String returns the string representation @@ -16031,6 +16723,12 @@ func (s *Method) SetRequestParameters(v map[string]*bool) *Method { return s } +// SetRequestValidatorId sets the RequestValidatorId field's value. +func (s *Method) SetRequestValidatorId(v string) *Method { + s.RequestValidatorId = &v + return s +} + // Represents a method response of a given HTTP status code returned to the // client. The method response is passed from the back end through the associated // integration response that can be transformed using a mapping template. @@ -16243,7 +16941,9 @@ type MethodSnapshot struct { // Specifies whether the method requires a valid ApiKey. ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` - // Specifies the type of authorization used for the method. + // The method's authorization type. Valid values are NONE for open access, AWS_IAM + // for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS + // for using a Cognito user pool. AuthorizationType *string `locationName:"authorizationType" type:"string"` } @@ -16370,7 +17070,10 @@ type PatchOperation struct { // op operation can have only one path associated with it. Path *string `locationName:"path" type:"string"` - // The new target value of the update operation. + // The new target value of the update operation. When using AWS CLI to update + // a property of a JSON value, enclose the JSON object with a pair of single + // quotes in a Linux shell, e.g., '{"a": ...}'. In a Windows shell, see Using + // JSON for Parameters (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json). Value *string `locationName:"value" type:"string"` } @@ -16751,7 +17454,9 @@ type PutMethodInput struct { // Specifies whether the method required a valid ApiKey. ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` - // Specifies the type of authorization used for the method. + // The method's authorization type. Valid values are NONE for open access, AWS_IAM + // for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS + // for using a Cognito user pool. // // AuthorizationType is a required field AuthorizationType *string `locationName:"authorizationType" type:"string" required:"true"` @@ -16785,6 +17490,9 @@ type PutMethodInput struct { // integration request parameters or body-mapping templates. RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` + // The identifier of a RequestValidator for validating the method request. + RequestValidatorId *string `locationName:"requestValidatorId" type:"string"` + // The Resource identifier for the new Method resource. // // ResourceId is a required field @@ -16870,6 +17578,12 @@ func (s *PutMethodInput) SetRequestParameters(v map[string]*bool) *PutMethodInpu return s } +// SetRequestValidatorId sets the RequestValidatorId field's value. +func (s *PutMethodInput) SetRequestValidatorId(v string) *PutMethodInput { + s.RequestValidatorId = &v + return s +} + // SetResourceId sets the ResourceId field's value. func (s *PutMethodInput) SetResourceId(v string) *PutMethodInput { s.ResourceId = &v @@ -17151,8 +17865,8 @@ type Resource struct { // Request // // GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET HTTP/1.1 Content-Type: - // application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160608T031827Z - // Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160608/us-east-1/apigateway/aws4_request, + // application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20170223T031827Z + // Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20170223/us-east-1/apigateway/aws4_request, // SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} // Response // @@ -18883,6 +19597,131 @@ func (s *UpdateModelInput) SetRestApiId(v string) *UpdateModelInput { return s } +// Updates a RequestValidator of a given RestApi. +type UpdateRequestValidatorInput struct { + _ struct{} `type:"structure"` + + // A list of update operations to be applied to the specified resource and in + // the order specified in this list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // [Required] The identifier of RequestValidator to be updated. + // + // RequestValidatorId is a required field + RequestValidatorId *string `location:"uri" locationName:"requestvalidator_id" type:"string" required:"true"` + + // [Required] The identifier of the RestApi for which the given RequestValidator + // is updated. + // + // RestApiId is a required field + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRequestValidatorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRequestValidatorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRequestValidatorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRequestValidatorInput"} + if s.RequestValidatorId == nil { + invalidParams.Add(request.NewErrParamRequired("RequestValidatorId")) + } + if s.RestApiId == nil { + invalidParams.Add(request.NewErrParamRequired("RestApiId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPatchOperations sets the PatchOperations field's value. +func (s *UpdateRequestValidatorInput) SetPatchOperations(v []*PatchOperation) *UpdateRequestValidatorInput { + s.PatchOperations = v + return s +} + +// SetRequestValidatorId sets the RequestValidatorId field's value. +func (s *UpdateRequestValidatorInput) SetRequestValidatorId(v string) *UpdateRequestValidatorInput { + s.RequestValidatorId = &v + return s +} + +// SetRestApiId sets the RestApiId field's value. +func (s *UpdateRequestValidatorInput) SetRestApiId(v string) *UpdateRequestValidatorInput { + s.RestApiId = &v + return s +} + +// A set of validation rules for incoming Method requests. +// +// In Swagger, a RequestValidator of an API is defined by the x-amazon-apigateway-request-validators.requestValidator +// (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html#api-gateway-swagger-extensions-request-validators.requestValidator.html) +// object. It the referenced using the x-amazon-apigateway-request-validator +// (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html#api-gateway-swagger-extensions-request-validator) +// property. +// +// Enable Basic Request Validation in API Gateway (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-request-validation.html) +type UpdateRequestValidatorOutput struct { + _ struct{} `type:"structure"` + + // The identifier of this RequestValidator. + Id *string `locationName:"id" type:"string"` + + // The name of this RequestValidator + Name *string `locationName:"name" type:"string"` + + // A Boolean flag to indicate whether to validate a request body according to + // the configured Model schema. + ValidateRequestBody *bool `locationName:"validateRequestBody" type:"boolean"` + + // A Boolean flag to indicate whether to validate request parameters (true) + // or not (false). + ValidateRequestParameters *bool `locationName:"validateRequestParameters" type:"boolean"` +} + +// String returns the string representation +func (s UpdateRequestValidatorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRequestValidatorOutput) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *UpdateRequestValidatorOutput) SetId(v string) *UpdateRequestValidatorOutput { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateRequestValidatorOutput) SetName(v string) *UpdateRequestValidatorOutput { + s.Name = &v + return s +} + +// SetValidateRequestBody sets the ValidateRequestBody field's value. +func (s *UpdateRequestValidatorOutput) SetValidateRequestBody(v bool) *UpdateRequestValidatorOutput { + s.ValidateRequestBody = &v + return s +} + +// SetValidateRequestParameters sets the ValidateRequestParameters field's value. +func (s *UpdateRequestValidatorOutput) SetValidateRequestParameters(v bool) *UpdateRequestValidatorOutput { + s.ValidateRequestParameters = &v + return s +} + // Request to change information about a Resource resource. type UpdateResourceInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go index 23e0ecd6c..2189b20a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -1239,8 +1239,6 @@ func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, // the version or alias name using the Qualifier parameter. For more information // about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). // -// For information about adding permissions, see AddPermission. -// // You need permission for the lambda:GetPolicy action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1925,6 +1923,95 @@ func (c *Lambda) ListFunctionsPagesWithContext(ctx aws.Context, input *ListFunct return p.Err() } +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListTags for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Lambda) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "GET", + HTTPPath: "/2017-03-31/tags/{ARN}", + } + + if input == nil { + input = &ListTagsInput{} + } + + output = &ListTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTags API operation for AWS Lambda. +// +// Returns a list of tags assigned to a function when supplied the function +// ARN (Amazon Resource Name). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lambda's +// API operation ListTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// The AWS Lambda service encountered an internal error. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource (for example, a Lambda function or access policy statement) +// specified in the request does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One of the parameters in the request is invalid. For example, if you provided +// an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration +// API, that AWS Lambda is unable to assume you will get this exception. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *Lambda) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + return out, req.Send() +} + +// ListTagsWithContext is the same as ListTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListVersionsByFunction = "ListVersionsByFunction" // ListVersionsByFunctionRequest generates a "aws/request.Request" representing the @@ -2211,6 +2298,189 @@ func (c *Lambda) RemovePermissionWithContext(ctx aws.Context, input *RemovePermi return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See TagResource for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TagResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Lambda) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/2017-03-31/tags/{ARN}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Lambda. +// +// Creates a list of tags (key-value pairs) on the Lambda function. Requires +// the Lambda function ARN (Amazon Resource Name). If a key is specified without +// a value, Lambda creates a tag with the specified key and a value of null. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lambda's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// The AWS Lambda service encountered an internal error. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource (for example, a Lambda function or access policy statement) +// specified in the request does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One of the parameters in the request is invalid. For example, if you provided +// an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration +// API, that AWS Lambda is unable to assume you will get this exception. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *Lambda) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UntagResource for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UntagResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Lambda) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/2017-03-31/tags/{ARN}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Lambda. +// +// Removes tags from a Lambda function. Requires the function ARN (Amazon Resource +// Name). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Lambda's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceException "ServiceException" +// The AWS Lambda service encountered an internal error. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource (for example, a Lambda function or access policy statement) +// specified in the request does not exist. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValueException" +// One of the parameters in the request is invalid. For example, if you provided +// an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration +// API, that AWS Lambda is unable to assume you will get this exception. +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +func (c *Lambda) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateAlias = "UpdateAlias" // UpdateAliasRequest generates a "aws/request.Request" representing the @@ -2627,8 +2897,8 @@ type AccountLimit struct { // The default limit is 100. ConcurrentExecutions *int64 `type:"integer"` - // Maximum size, in megabytes, of a code package you can upload per region. - // The default size is 75 GB. + // Maximum size, in bytes, of a code package you can upload per region. The + // default size is 75 GB. TotalCodeSize *int64 `type:"long"` } @@ -2722,7 +2992,7 @@ type AddPermissionInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -2756,24 +3026,23 @@ type AddPermissionInput struct { // arn:aws:lambda:aws-region:acct-id:function:function-name Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` - // This parameter is used for S3, SES, CloudWatch Logs and CloudWatch Rules - // only. The AWS account ID (without a hyphen) of the source owner. For example, - // if the SourceArn identifies a bucket, then this is the bucket owner's account - // ID. You can use this additional condition to ensure the bucket you specify - // is owned by a specific account (it is possible the bucket owner deleted the - // bucket and some other AWS account created the bucket). You can also use this - // condition to specify all sources (that is, you don't specify the SourceArn) - // owned by a specific account. + // This parameter is used for S3 and SES. The AWS account ID (without a hyphen) + // of the source owner. For example, if the SourceArn identifies a bucket, then + // this is the bucket owner's account ID. You can use this additional condition + // to ensure the bucket you specify is owned by a specific account (it is possible + // the bucket owner deleted the bucket and some other AWS account created the + // bucket). You can also use this condition to specify all sources (that is, + // you don't specify the SourceArn) owned by a specific account. SourceAccount *string `type:"string"` - // This is optional; however, when granting Amazon S3 permission to invoke your + // This is optional; however, when granting a source permission to invoke your // function, you should specify this field with the Amazon Resource Name (ARN) // as its value. This ensures that only events generated from the specified // source can invoke the function. // - // If you add a permission for the Amazon S3 principal without providing the - // source ARN, any AWS account that creates a mapping to your function ARN can - // send events to invoke your Lambda function from Amazon S3. + // If you add a permission for the source without providing the source ARN, + // any AWS account that creates a mapping to your function ARN can send events + // to invoke your Lambda function from that source. SourceArn *string `type:"string"` // A unique statement identifier. @@ -2955,7 +3224,9 @@ type CreateAliasInput struct { // Description of the alias. Description *string `type:"string"` - // Name of the Lambda function for which you want to create an alias. + // Name of the Lambda function for which you want to create an alias. Note that + // the length constraint applies only to the ARN. If you specify only the function + // name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -3068,7 +3339,7 @@ type CreateEventSourceMappingInput struct { // ID qualifier (for example, account-id:Thumbnail). // // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `min:"1" type:"string" required:"true"` @@ -3182,7 +3453,8 @@ type CreateFunctionInput struct { // The name you want to assign to the function you are uploading. The function // names appear in the console and are returned in the ListFunctions API. Function // names are used to specify functions to other AWS Lambda API operations, such - // as Invoke. + // as Invoke. Note that the length constraint applies only to the ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `min:"1" type:"string" required:"true"` @@ -3221,17 +3493,22 @@ type CreateFunctionInput struct { // The runtime environment for the Lambda function you are uploading. // - // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier - // runtime (v0.10.42), set the value to "nodejs". + // To use the Python runtime v3.6, set the value to "python3.6". To use the + // Python runtime v2.7, set the value to "python2.7". To use the Node.js runtime + // v6.10, set the value to "nodejs6.10". To use the Node.js runtime v4.3, set + // the value to "nodejs4.3". // // You can no longer create functions using the v0.10.42 runtime version as // of November, 2016. Existing functions will be supported until early 2017, - // but we recommend you migrate them to nodejs4.3 runtime version as soon as - // possible. + // but we recommend you migrate them to either nodejs6.10 or nodejs4.3 runtime + // version as soon as possible. // // Runtime is a required field Runtime *string `type:"string" required:"true" enum:"Runtime"` + // The list of tags (key-value pairs) assigned to the new function. + Tags map[string]*string `type:"map"` + // The function execution time at which Lambda should terminate the function. // Because the execution time has cost implications, we recommend you set this // value based on your expected execution time. The default is 3 seconds. @@ -3359,6 +3636,12 @@ func (s *CreateFunctionInput) SetRuntime(v string) *CreateFunctionInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateFunctionInput) SetTags(v map[string]*string) *CreateFunctionInput { + s.Tags = v + return s +} + // SetTimeout sets the Timeout field's value. func (s *CreateFunctionInput) SetTimeout(v int64) *CreateFunctionInput { s.Timeout = &v @@ -3401,7 +3684,9 @@ type DeleteAliasInput struct { _ struct{} `type:"structure"` // The Lambda function name for which the alias is created. Deleting an alias - // does not delete the function version to which it is pointing. + // does not delete the function version to which it is pointing. Note that the + // length constraint applies only to the ARN. If you specify only the function + // name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -3520,7 +3805,7 @@ type DeleteFunctionInput struct { // Lambda also allows you to specify only the function name with the account // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint // applies only to the ARN. If you specify only the function name, it is limited - // to 64 character in length. + // to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -3919,7 +4204,9 @@ type FunctionConfiguration struct { // The Amazon Resource Name (ARN) assigned to the function. FunctionArn *string `type:"string"` - // The name of the function. + // The name of the function. Note that the length constraint applies only to + // the ARN. If you specify only the function name, it is limited to 64 characters + // in length. FunctionName *string `min:"1" type:"string"` // The function Lambda calls to begin executing your function. @@ -3942,9 +4229,6 @@ type FunctionConfiguration struct { Role *string `type:"string"` // The runtime environment for the Lambda function. - // - // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier - // runtime (v0.10.42), set the value to "nodejs". Runtime *string `type:"string" enum:"Runtime"` // The function execution time at which Lambda should terminate the function. @@ -4118,7 +4402,9 @@ type GetAliasInput struct { // Function name for which the alias is created. An alias is a subresource that // exists only in the context of an existing Lambda function so you must specify - // the function name. + // the function name. Note that the length constraint applies only to the ARN. + // If you specify only the function name, it is limited to 64 characters in + // length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4221,7 +4507,7 @@ type GetFunctionConfigurationInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4287,7 +4573,7 @@ type GetFunctionInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4352,6 +4638,9 @@ type GetFunctionOutput struct { // A complex type that describes function metadata. Configuration *FunctionConfiguration `type:"structure"` + + // Returns the list of tags associated with the function. + Tags map[string]*string `type:"map"` } // String returns the string representation @@ -4376,6 +4665,12 @@ func (s *GetFunctionOutput) SetConfiguration(v *FunctionConfiguration) *GetFunct return s } +// SetTags sets the Tags field's value. +func (s *GetFunctionOutput) SetTags(v map[string]*string) *GetFunctionOutput { + s.Tags = v + return s +} + type GetPolicyInput struct { _ struct{} `type:"structure"` @@ -4388,7 +4683,7 @@ type GetPolicyInput struct { // Lambda also allows you to specify only the function name with the account // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint // applies only to the ARN. If you specify only the function name, it is limited - // to 64 character in length. + // to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4469,7 +4764,9 @@ func (s *GetPolicyOutput) SetPolicy(v string) *GetPolicyOutput { type InvokeAsyncInput struct { _ struct{} `deprecated:"true" type:"structure" payload:"InvokeArgs"` - // The Lambda function name. + // The Lambda function name. Note that the length constraint applies only to + // the ARN. If you specify only the function name, it is limited to 64 characters + // in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4563,7 +4860,7 @@ type InvokeInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4731,7 +5028,9 @@ func (s *InvokeOutput) SetStatusCode(v int64) *InvokeOutput { type ListAliasesInput struct { _ struct{} `type:"structure"` - // Lambda function name for which the alias is created. + // Lambda function name for which the alias is created. Note that the length + // constraint applies only to the ARN. If you specify only the function name, + // it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -4854,7 +5153,7 @@ type ListEventSourceMappingsInput struct { // Lambda also allows you to specify only the function name with the account // ID qualifier (for example, account-id:Thumbnail). Note that the length constraint // applies only to the ARN. If you specify only the function name, it is limited - // to 64 character in length. + // to 64 characters in length. FunctionName *string `location:"querystring" locationName:"FunctionName" min:"1" type:"string"` // Optional string. An opaque pagination token returned from a previous ListEventSourceMappings @@ -5030,6 +5329,67 @@ func (s *ListFunctionsOutput) SetNextMarker(v string) *ListFunctionsOutput { return s } +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the function. + // + // Resource is a required field + Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *ListTagsInput) SetResource(v string) *ListTagsInput { + s.Resource = &v + return s +} + +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // The list of tags assigned to the function. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOutput) SetTags(v map[string]*string) *ListTagsOutput { + s.Tags = v + return s +} + type ListVersionsByFunctionInput struct { _ struct{} `type:"structure"` @@ -5038,7 +5398,7 @@ type ListVersionsByFunctionInput struct { // function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -5149,7 +5509,7 @@ type PublishVersionInput struct { // arn:aws:lambda:us-west-2:account-id:function:ThumbNail). AWS Lambda also // allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -5208,7 +5568,7 @@ type RemovePermissionInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -5292,13 +5652,147 @@ func (s RemovePermissionOutput) GoString() string { return s.String() } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the Lambda function. + // + // Resource is a required field + Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` + + // The list of tags (key-value pairs) you are assigning to the Lambda function. + // + // Tags is a required field + Tags map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *TagResourceInput) SetResource(v string) *TagResourceInput { + s.Resource = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the function. + // + // Resource is a required field + Resource *string `location:"uri" locationName:"ARN" type:"string" required:"true"` + + // The list of tag keys to be deleted from the function. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *UntagResourceInput) SetResource(v string) *UntagResourceInput { + s.Resource = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateAliasInput struct { _ struct{} `type:"structure"` // You can change the description of the alias using this parameter. Description *string `type:"string"` - // The function name for which the alias is created. + // The function name for which the alias is created. Note that the length constraint + // applies only to the ARN. If you specify only the function name, it is limited + // to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -5388,6 +5882,8 @@ type UpdateEventSourceMappingInput struct { // You can specify a function name (for example, Thumbnail) or you can specify // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 characters in length. // // If you are using versioning, you can also provide a qualified function ARN // (ARN that is qualified with function version or alias name as suffix). For @@ -5466,7 +5962,7 @@ type UpdateFunctionCodeInput struct { // Amazon Resource Name (ARN) of the function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail). // AWS Lambda also allows you to specify a partial ARN (for example, account-id:Thumbnail). // Note that the length constraint applies only to the ARN. If you specify only - // the function name, it is limited to 64 character in length. + // the function name, it is limited to 64 characters in length. // // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` @@ -5616,8 +6112,11 @@ type UpdateFunctionConfigurationInput struct { // The runtime environment for the Lambda function. // - // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier - // runtime (v0.10.42), set the value to "nodejs". + // To use the Python runtime v3.6, set the value to "python3.6". To use the + // Python runtime v2.7, set the value to "python2.7". To use the Node.js runtime + // v6.10, set the value to "nodejs6.10". To use the Node.js runtime v4.3, set + // the value to "nodejs4.3". To use the Python runtime v3.6, set the value to + // "python3.6". To use the Python runtime v2.7, set the value to "python2.7". // // You can no longer downgrade to the v0.10.42 runtime version. This version // will no longer be supported as of early 2017. @@ -5857,6 +6356,9 @@ const ( // RuntimePython27 is a Runtime enum value RuntimePython27 = "python2.7" + // RuntimePython36 is a Runtime enum value + RuntimePython36 = "python3.6" + // RuntimeDotnetcore10 is a Runtime enum value RuntimeDotnetcore10 = "dotnetcore1.0" diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go index 430a492f0..e415f79e8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -67,7 +67,7 @@ func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *reque // * You can assign registered Amazon EC2 instances only to custom layers. // // * You cannot use this action with instances that were created with AWS -// OpsWorks. +// OpsWorks Stacks. // // Required Permissions: To use this action, an AWS Identity and Access Management // (IAM) user must have a Manage permissions level for the stack or an attached @@ -1698,7 +1698,7 @@ func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (re // // Deregister a registered Amazon EC2 or on-premises instance. This action removes // the instance from the stack and returns it to your control. This action can -// not be used with instances that were created with AWS OpsWorks. +// not be used with instances that were created with AWS OpsWorks Stacks. // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants @@ -1967,9 +1967,9 @@ func (c *OpsWorks) DescribeAgentVersionsRequest(input *DescribeAgentVersionsInpu // DescribeAgentVersions API operation for AWS OpsWorks. // -// Describes the available AWS OpsWorks agent versions. You must specify a stack -// ID or a configuration manager. DescribeAgentVersions returns a list of available -// agent versions for the specified stack or configuration manager. +// Describes the available AWS OpsWorks Stacks agent versions. You must specify +// a stack ID or a configuration manager. DescribeAgentVersions returns a list +// of available agent versions for the specified stack or configuration manager. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2054,7 +2054,7 @@ func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.R // // Requests a description of a specified set of apps. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2144,7 +2144,7 @@ func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *r // // Describes the results of specified commands. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2234,7 +2234,7 @@ func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) ( // // Requests a description of a specified set of deployments. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2330,14 +2330,16 @@ func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) ( // // Describes Amazon ECS clusters that are registered with a stack. If you specify // only a stack ID, you can use the MaxResults and NextToken parameters to paginate -// the response. However, AWS OpsWorks currently supports only one cluster per -// layer, so the result set has a maximum of one element. +// the response. However, AWS OpsWorks Stacks currently supports only one cluster +// per layer, so the result set has a maximum of one element. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack or an attached policy that explicitly // grants permission. For more information on user permissions, see Managing // User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // +// This call accepts only one resource-identifying parameter. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2471,7 +2473,7 @@ func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (re // // Describes Elastic IP addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2561,7 +2563,7 @@ func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoa // // Describes a stack's Elastic Load Balancing instances. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2651,7 +2653,7 @@ func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req // // Requests a description of a set of instances. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -2741,7 +2743,7 @@ func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *reque // // Requests a description of one or more layers in a specified stack. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -3088,7 +3090,7 @@ func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (re // // Describe an instance's RAID arrays. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -3183,6 +3185,8 @@ func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesIn // grants permissions. For more information on user permissions, see Managing // User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // +// This call accepts only one resource-identifying parameter. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3264,13 +3268,15 @@ func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInpu // DescribeServiceErrors API operation for AWS OpsWorks. // -// Describes AWS OpsWorks service errors. +// Describes AWS OpsWorks Stacks service errors. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information on user permissions, see Managing // User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). // +// This call accepts only one resource-identifying parameter. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3796,7 +3802,7 @@ func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *req // // Describes an instance's Amazon EBS volumes. // -// You must specify at least one of the parameters. +// This call accepts only one resource-identifying parameter. // // Required Permissions: To use this action, an IAM user must have a Show, Deploy, // or Manage permissions level for the stack, or an attached policy that explicitly @@ -4509,15 +4515,21 @@ func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *r // RegisterInstance API operation for AWS OpsWorks. // -// Registers instances with a specified stack that were created outside of AWS -// OpsWorks. +// Registers instances that were created outside of AWS OpsWorks Stacks with +// a specified stack. // // We do not recommend using this action to register instances. The complete -// registration operation has two primary steps, installing the AWS OpsWorks -// agent on the instance and registering the instance with the stack. RegisterInstance +// registration operation includes two tasks: installing the AWS OpsWorks Stacks +// agent on the instance, and registering the instance with the stack. RegisterInstance // handles only the second step. You should instead use the AWS CLI register // command, which performs the entire registration operation. For more information, -// see Registering an Instance with an AWS OpsWorks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// see Registering an Instance with an AWS OpsWorks Stacks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// +// Registered instances have the same requirements as instances that are created +// by using the CreateInstance API. For example, registered instances must be +// running a supported Linux-based operating system, and they must have a supported +// instance type. For more information about requirements for instances that +// you want to register, see Preparing the Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register-registering-preparer.html). // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants @@ -5435,7 +5447,7 @@ func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *r // Unassigns a registered instance from all of it's layers. The instance remains // in the stack as an unassigned instance and can be assigned to another layer, // as needed. You cannot use this action with instances that were created with -// AWS OpsWorks. +// AWS OpsWorks Stacks. // // Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants @@ -6828,7 +6840,7 @@ func (s AttachElasticLoadBalancerOutput) GoString() string { } // Describes a load-based auto scaling upscaling or downscaling threshold configuration, -// which specifies when AWS OpsWorks starts or stops load-based instances. +// which specifies when AWS OpsWorks Stacks starts or stops load-based instances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/AutoScalingThresholds type AutoScalingThresholds struct { _ struct{} `type:"structure"` @@ -6838,9 +6850,9 @@ type AutoScalingThresholds struct { // be in the same region as the stack. // // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. - // You can either have AWS OpsWorks update the role for you when you first use - // this feature or you can edit the role manually. For more information, see - // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). + // You can either have AWS OpsWorks Stacks update the role for you when you + // first use this feature or you can edit the role manually. For more information, + // see Allowing AWS OpsWorks Stacks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). Alarms []*string `type:"list"` // The CPU utilization threshold, as a percent of the available CPU. A value @@ -6848,13 +6860,13 @@ type AutoScalingThresholds struct { CpuThreshold *float64 `type:"double"` // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks - // should ignore metrics and suppress additional scaling events. For example, - // AWS OpsWorks adds new instances following an upscaling event but the instances - // won't start reducing the load until they have been booted and configured. - // There is no point in raising additional scaling events during that operation, - // which typically takes several minutes. IgnoreMetricsTime allows you to direct - // AWS OpsWorks to suppress scaling events long enough to get the new instances - // online. + // Stacks should ignore metrics and suppress additional scaling events. For + // example, AWS OpsWorks Stacks adds new instances following an upscaling event + // but the instances won't start reducing the load until they have been booted + // and configured. There is no point in raising additional scaling events during + // that operation, which typically takes several minutes. IgnoreMetricsTime + // allows you to direct AWS OpsWorks Stacks to suppress scaling events long + // enough to get the new instances online. IgnoreMetricsTime *int64 `min:"1" type:"integer"` // The number of instances to add or remove when the load exceeds a threshold. @@ -6950,7 +6962,7 @@ type BlockDeviceMapping struct { // The device name that is exposed to the instance, such as /dev/sdh. For the // root device, you can use the explicit device name or you can set this parameter - // to ROOT_DEVICE and AWS OpsWorks will provide the correct device name. + // to ROOT_DEVICE and AWS OpsWorks Stacks will provide the correct device name. DeviceName *string `type:"string"` // An EBSBlockDevice that defines how to configure an Amazon EBS volume when @@ -7036,20 +7048,21 @@ func (s *ChefConfiguration) SetManageBerkshelf(v bool) *ChefConfiguration { type CloneStackInput struct { _ struct{} `type:"structure"` - // The default AWS OpsWorks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following options: // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically // installs new agent versions on the stack's instances as soon as they are // available. // // * Fixed version - Set this parameter to your preferred agent version. // To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks then automatically installs that version - // on the stack's instances. + // specify a new version. AWS OpsWorks Stacks then automatically installs + // that version on the stack's instances. // // The default setting is LATEST. To specify an agent version, you must use // the complete version number, not the abbreviated number shown on the console. // For a list of available agent version numbers, call DescribeAgentVersions. + // AgentVersion cannot be set to Chef 12.2. // // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. @@ -7105,12 +7118,13 @@ type CloneStackInput struct { // The stack's operating system, which must be set to one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon + // Linux 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS 7 + // * CentOS Linux 7 // // * Red Hat Enterprise Linux 7 // @@ -7123,7 +7137,8 @@ type CloneStackInput struct { // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the parent stack's operating system. For more information - // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // on the supported operating systems, see AWS OpsWorks Stacks Operating Systems + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // You can specify a different Linux operating system for the cloned stack, // but you cannot change from Linux to Windows or Windows to Linux. @@ -7190,11 +7205,12 @@ type CloneStackInput struct { Region *string `type:"string"` // The stack AWS Identity and Access Management (IAM) role, which allows AWS - // OpsWorks to work with AWS resources on your behalf. You must set this parameter - // to the Amazon Resource Name (ARN) for an existing IAM role. If you create - // a stack by using the AWS OpsWorks console, it creates the role for you. You - // can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. - // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // OpsWorks Stacks to work with AWS resources on your behalf. You must set this + // parameter to the Amazon Resource Name (ARN) for an existing IAM role. If + // you create a stack by using the AWS OpsWorks Stacks console, it creates the + // role for you. You can obtain an existing stack's IAM ARN programmatically + // by calling DescribePermissions. For more information about IAM ARNs, see + // Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). // // You must set this parameter to a valid service role ARN or the action will // fail; there is no default value. You can specify the source stack's service @@ -7211,25 +7227,25 @@ type CloneStackInput struct { // Whether to use custom cookbooks. UseCustomCookbooks *bool `type:"boolean"` - // Whether to associate the AWS OpsWorks built-in security groups with the stack's - // layers. + // Whether to associate the AWS OpsWorks Stacks built-in security groups with + // the stack's layers. // - // AWS OpsWorks provides a standard set of built-in security groups, one for - // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // AWS OpsWorks Stacks provides a standard set of built-in security groups, + // one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups // you can instead provide your own custom security groups. UseOpsworksSecurityGroups // has the following settings: // - // * True - AWS OpsWorks automatically associates the appropriate built-in - // security group with each layer (default setting). You can associate additional - // security groups with a layer after you create it but you cannot delete - // the built-in security group. + // * True - AWS OpsWorks Stacks automatically associates the appropriate + // built-in security group with each layer (default setting). You can associate + // additional security groups with a layer after you create it but you cannot + // delete the built-in security group. // - // * False - AWS OpsWorks does not associate built-in security groups with - // layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon - // EC2) security groups and associate a security group with each layer that - // you create. However, you can still manually associate a built-in security - // group with a layer on creation; custom security groups are required only - // for those layers that need custom settings. + // * False - AWS OpsWorks Stacks does not associate built-in security groups + // with layers. You must create appropriate Amazon Elastic Compute Cloud + // (Amazon EC2) security groups and associate a security group with each + // layer that you create. However, you can still manually associate a built-in + // security group with a layer on creation; custom security groups are required + // only for those layers that need custom settings. // // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` @@ -7245,9 +7261,10 @@ type CloneStackInput struct { // // If the VPC ID corresponds to a default VPC and you have specified either // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks - // infers the value of the other parameter. If you specify neither parameter, - // AWS OpsWorks sets these parameters to the first valid Availability Zone for - // the specified region and the corresponding default VPC subnet ID, respectively. + // Stacks infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks Stacks sets these parameters to the first valid Availability + // Zone for the specified region and the corresponding default VPC subnet ID, + // respectively. // // If you specify a nondefault VPC ID, note the following: // @@ -7255,8 +7272,8 @@ type CloneStackInput struct { // // * You must specify a value for DefaultSubnetId. // - // For more information on how to use AWS OpsWorks with a VPC, see Running a - // Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on how to use AWS OpsWorks Stacks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information on default VPC and EC2 Classic, see Supported Platforms // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` @@ -7445,6 +7462,185 @@ func (s *CloneStackOutput) SetStackId(v string) *CloneStackOutput { return s } +// Describes the Amazon CloudWatch logs configuration for a layer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/CloudWatchLogsConfiguration +type CloudWatchLogsConfiguration struct { + _ struct{} `type:"structure"` + + // Whether CloudWatch Logs is enabled for a layer. + Enabled *bool `type:"boolean"` + + // A list of configuration options for CloudWatch Logs. + LogStreams []*CloudWatchLogsLogStream `type:"list"` +} + +// String returns the string representation +func (s CloudWatchLogsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchLogsConfiguration) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *CloudWatchLogsConfiguration) SetEnabled(v bool) *CloudWatchLogsConfiguration { + s.Enabled = &v + return s +} + +// SetLogStreams sets the LogStreams field's value. +func (s *CloudWatchLogsConfiguration) SetLogStreams(v []*CloudWatchLogsLogStream) *CloudWatchLogsConfiguration { + s.LogStreams = v + return s +} + +// Describes the Amazon CloudWatch logs configuration for a layer. For detailed +// information about members of this data type, see the CloudWatch Logs Agent +// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/CloudWatchLogsLogStream +type CloudWatchLogsLogStream struct { + _ struct{} `type:"structure"` + + // Specifies the max number of log events in a batch, up to 10000. The default + // value is 1000. + BatchCount *int64 `type:"integer"` + + // Specifies the maximum size of log events in a batch, in bytes, up to 1048576 + // bytes. The default value is 32768 bytes. This size is calculated as the sum + // of all event messages in UTF-8, plus 26 bytes for each log event. + BatchSize *int64 `type:"integer"` + + // Specifies the time duration for the batching of log events. The minimum value + // is 5000ms and default value is 5000ms. + BufferDuration *int64 `type:"integer"` + + // Specifies how the time stamp is extracted from logs. For more information, + // see the CloudWatch Logs Agent Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html). + DatetimeFormat *string `type:"string"` + + // Specifies the encoding of the log file so that the file can be read correctly. + // The default is utf_8. Encodings supported by Python codecs.decode() can be + // used here. + Encoding *string `type:"string" enum:"CloudWatchLogsEncoding"` + + // Specifies log files that you want to push to CloudWatch Logs. + // + // File can point to a specific file or multiple files (by using wild card characters + // such as /var/log/system.log*). Only the latest file is pushed to CloudWatch + // Logs, based on file modification time. We recommend that you use wild card + // characters to specify a series of files of the same type, such as access_log.2014-06-01-01, + // access_log.2014-06-01-02, and so on by using a pattern like access_log.*. + // Don't use a wildcard to match multiple file types, such as access_log_80 + // and access_log_443. To specify multiple, different file types, add another + // log stream entry to the configuration file, so that each log file type is + // stored in a different log group. + // + // Zipped files are not supported. + File *string `type:"string"` + + // Specifies the range of lines for identifying a file. The valid values are + // one number, or two dash-delimited numbers, such as '1', '2-5'. The default + // value is '1', meaning the first line is used to calculate the fingerprint. + // Fingerprint lines are not sent to CloudWatch Logs unless all specified lines + // are available. + FileFingerprintLines *string `type:"string"` + + // Specifies where to start to read data (start_of_file or end_of_file). The + // default is start_of_file. This setting is only used if there is no state + // persisted for that log stream. + InitialPosition *string `type:"string" enum:"CloudWatchLogsInitialPosition"` + + // Specifies the destination log group. A log group is created automatically + // if it doesn't already exist. Log group names can be between 1 and 512 characters + // long. Allowed characters include a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), + // '/' (forward slash), and '.' (period). + LogGroupName *string `type:"string"` + + // Specifies the pattern for identifying the start of a log message. + MultiLineStartPattern *string `type:"string"` + + // Specifies the time zone of log event time stamps. + TimeZone *string `type:"string" enum:"CloudWatchLogsTimeZone"` +} + +// String returns the string representation +func (s CloudWatchLogsLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchLogsLogStream) GoString() string { + return s.String() +} + +// SetBatchCount sets the BatchCount field's value. +func (s *CloudWatchLogsLogStream) SetBatchCount(v int64) *CloudWatchLogsLogStream { + s.BatchCount = &v + return s +} + +// SetBatchSize sets the BatchSize field's value. +func (s *CloudWatchLogsLogStream) SetBatchSize(v int64) *CloudWatchLogsLogStream { + s.BatchSize = &v + return s +} + +// SetBufferDuration sets the BufferDuration field's value. +func (s *CloudWatchLogsLogStream) SetBufferDuration(v int64) *CloudWatchLogsLogStream { + s.BufferDuration = &v + return s +} + +// SetDatetimeFormat sets the DatetimeFormat field's value. +func (s *CloudWatchLogsLogStream) SetDatetimeFormat(v string) *CloudWatchLogsLogStream { + s.DatetimeFormat = &v + return s +} + +// SetEncoding sets the Encoding field's value. +func (s *CloudWatchLogsLogStream) SetEncoding(v string) *CloudWatchLogsLogStream { + s.Encoding = &v + return s +} + +// SetFile sets the File field's value. +func (s *CloudWatchLogsLogStream) SetFile(v string) *CloudWatchLogsLogStream { + s.File = &v + return s +} + +// SetFileFingerprintLines sets the FileFingerprintLines field's value. +func (s *CloudWatchLogsLogStream) SetFileFingerprintLines(v string) *CloudWatchLogsLogStream { + s.FileFingerprintLines = &v + return s +} + +// SetInitialPosition sets the InitialPosition field's value. +func (s *CloudWatchLogsLogStream) SetInitialPosition(v string) *CloudWatchLogsLogStream { + s.InitialPosition = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CloudWatchLogsLogStream) SetLogGroupName(v string) *CloudWatchLogsLogStream { + s.LogGroupName = &v + return s +} + +// SetMultiLineStartPattern sets the MultiLineStartPattern field's value. +func (s *CloudWatchLogsLogStream) SetMultiLineStartPattern(v string) *CloudWatchLogsLogStream { + s.MultiLineStartPattern = &v + return s +} + +// SetTimeZone sets the TimeZone field's value. +func (s *CloudWatchLogsLogStream) SetTimeZone(v string) *CloudWatchLogsLogStream { + s.TimeZone = &v + return s +} + // Describes a command. // Please also see https://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/Command type Command struct { @@ -7636,9 +7832,9 @@ type CreateAppInput struct { // The app type. Each supported type is associated with a particular layer. // For example, PHP applications are associated with a PHP layer. AWS OpsWorks - // deploys an application to those instances that are members of the corresponding - // layer. If your app isn't one of the standard types, or you prefer to implement - // your own Deploy recipes, specify other. + // Stacks deploys an application to those instances that are members of the + // corresponding layer. If your app isn't one of the standard types, or you + // prefer to implement your own Deploy recipes, specify other. // // Type is a required field Type *string `type:"string" required:"true" enum:"AppType"` @@ -7927,18 +8123,19 @@ func (s *CreateDeploymentOutput) SetDeploymentId(v string) *CreateDeploymentOutp type CreateInstanceInput struct { _ struct{} `type:"structure"` - // The default AWS OpsWorks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following options: // // * INHERIT - Use the stack's default agent version setting. // // * version_number - Use the specified agent version. This value overrides // the stack's default setting. To update the agent version, edit the instance - // configuration and specify a new version. AWS OpsWorks then automatically + // configuration and specify a new version. AWS OpsWorks Stacks then automatically // installs that version on the instance. // // The default setting is INHERIT. To specify an agent version, you must use // the complete version number, not the abbreviated number shown on the console. // For a list of available agent version numbers, call DescribeAgentVersions. + // AgentVersion cannot be set to Chef 12.2. AgentVersion *string `type:"string"` // A custom AMI ID to be used to create the instance. The AMI should be based @@ -8001,12 +8198,13 @@ type CreateInstanceInput struct { // The instance's operating system, which must be set to one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon + // Linux 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS 7 + // * CentOS Linux 7 // // * Red Hat Enterprise Linux 7 // @@ -8018,15 +8216,15 @@ type CreateInstanceInput struct { // * A custom AMI: Custom. // // For more information on the supported operating systems, see AWS OpsWorks - // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // Stacks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this parameter // to Custom, you must use the CreateInstance action's AmiId parameter to specify // the custom AMI that you want to use. Block device mappings are not supported // if the value is Custom. For more information on the supported operating systems, // see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For - // more information on how to use custom AMIs with AWS OpsWorks, see Using Custom - // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // more information on how to use custom AMIs with AWS OpsWorks Stacks, see + // Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). Os *string `type:"string"` // The instance root device type. For more information, see Storage for the @@ -8043,7 +8241,7 @@ type CreateInstanceInput struct { // The ID of the instance's subnet. If the stack is running in a VPC, you can // use this parameter to override the stack's default subnet ID value and direct - // AWS OpsWorks to launch the instance in a different subnet. + // AWS OpsWorks Stacks to launch the instance in a different subnet. SubnetId *string `type:"string"` // The instance's tenancy option. The default option is no tenancy, or if the @@ -8243,6 +8441,10 @@ type CreateLayerInput struct { // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` + // Specifies CloudWatch Logs configuration options for the layer. For more information, + // see CloudWatchLogsLogStream. + CloudWatchLogsConfiguration *CloudWatchLogsConfiguration `type:"structure"` + // The ARN of an IAM profile to be used for the layer's EC2 instances. For more // information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). CustomInstanceProfileArn *string `type:"string"` @@ -8286,13 +8488,13 @@ type CreateLayerInput struct { Packages []*string `type:"list"` // For custom layers only, use this parameter to specify the layer's short name, - // which is used internally by AWS OpsWorks and by Chef recipes. The short name - // is also used as the name for the directory where your app files are installed. - // It can have a maximum of 200 characters, which are limited to the alphanumeric - // characters, '-', '_', and '.'. + // which is used internally by AWS OpsWorks Stacks and by Chef recipes. The + // short name is also used as the name for the directory where your app files + // are installed. It can have a maximum of 200 characters, which are limited + // to the alphanumeric characters, '-', '_', and '.'. // - // The built-in layers' short names are defined by AWS OpsWorks. For more information, - // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). + // The built-in layers' short names are defined by AWS OpsWorks Stacks. For + // more information, see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). // // Shortname is a required field Shortname *string `type:"string" required:"true"` @@ -8376,6 +8578,12 @@ func (s *CreateLayerInput) SetAutoAssignPublicIps(v bool) *CreateLayerInput { return s } +// SetCloudWatchLogsConfiguration sets the CloudWatchLogsConfiguration field's value. +func (s *CreateLayerInput) SetCloudWatchLogsConfiguration(v *CloudWatchLogsConfiguration) *CreateLayerInput { + s.CloudWatchLogsConfiguration = v + return s +} + // SetCustomInstanceProfileArn sets the CustomInstanceProfileArn field's value. func (s *CreateLayerInput) SetCustomInstanceProfileArn(v string) *CreateLayerInput { s.CustomInstanceProfileArn = &v @@ -8489,21 +8697,21 @@ func (s *CreateLayerOutput) SetLayerId(v string) *CreateLayerOutput { type CreateStackInput struct { _ struct{} `type:"structure"` - // The default AWS OpsWorks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following options: // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically // installs new agent versions on the stack's instances as soon as they are // available. // // * Fixed version - Set this parameter to your preferred agent version. // To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks then automatically installs that version - // on the stack's instances. + // specify a new version. AWS OpsWorks Stacks then automatically installs + // that version on the stack's instances. // // The default setting is the most recent release of the agent. To specify an // agent version, you must use the complete version number, not the abbreviated // number shown on the console. For a list of available agent version numbers, - // call DescribeAgentVersions. + // call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. // // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. @@ -8556,12 +8764,13 @@ type CreateStackInput struct { // You can specify one of the following. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon + // Linux 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS 7 + // * CentOS Linux 7 // // * Red Hat Enterprise Linux 7 // @@ -8574,7 +8783,8 @@ type CreateStackInput struct { // you create instances. For more information, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the current Amazon Linux version. For more information - // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // on the supported operating systems, see AWS OpsWorks Stacks Operating Systems + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` // The default root device type. This value is the default for all instances @@ -8643,9 +8853,9 @@ type CreateStackInput struct { Region *string `type:"string" required:"true"` // The stack's AWS Identity and Access Management (IAM) role, which allows AWS - // OpsWorks to work with AWS resources on your behalf. You must set this parameter - // to the Amazon Resource Name (ARN) for an existing IAM role. For more information - // about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // OpsWorks Stacks to work with AWS resources on your behalf. You must set this + // parameter to the Amazon Resource Name (ARN) for an existing IAM role. For + // more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). // // ServiceRoleArn is a required field ServiceRoleArn *string `type:"string" required:"true"` @@ -8653,21 +8863,21 @@ type CreateStackInput struct { // Whether the stack uses custom cookbooks. UseCustomCookbooks *bool `type:"boolean"` - // Whether to associate the AWS OpsWorks built-in security groups with the stack's - // layers. + // Whether to associate the AWS OpsWorks Stacks built-in security groups with + // the stack's layers. // - // AWS OpsWorks provides a standard set of built-in security groups, one for - // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // AWS OpsWorks Stacks provides a standard set of built-in security groups, + // one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups // you can instead provide your own custom security groups. UseOpsworksSecurityGroups // has the following settings: // - // * True - AWS OpsWorks automatically associates the appropriate built-in - // security group with each layer (default setting). You can associate additional - // security groups with a layer after you create it, but you cannot delete - // the built-in security group. + // * True - AWS OpsWorks Stacks automatically associates the appropriate + // built-in security group with each layer (default setting). You can associate + // additional security groups with a layer after you create it, but you cannot + // delete the built-in security group. // - // * False - AWS OpsWorks does not associate built-in security groups with - // layers. You must create appropriate EC2 security groups and associate + // * False - AWS OpsWorks Stacks does not associate built-in security groups + // with layers. You must create appropriate EC2 security groups and associate // a security group with each layer that you create. However, you can still // manually associate a built-in security group with a layer on creation; // custom security groups are required only for those layers that need custom @@ -8687,9 +8897,10 @@ type CreateStackInput struct { // // If the VPC ID corresponds to a default VPC and you have specified either // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks - // infers the value of the other parameter. If you specify neither parameter, - // AWS OpsWorks sets these parameters to the first valid Availability Zone for - // the specified region and the corresponding default VPC subnet ID, respectively. + // Stacks infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks Stacks sets these parameters to the first valid Availability + // Zone for the specified region and the corresponding default VPC subnet ID, + // respectively. // // If you specify a nondefault VPC ID, note the following: // @@ -8697,8 +8908,8 @@ type CreateStackInput struct { // // * You must specify a value for DefaultSubnetId. // - // For more information on how to use AWS OpsWorks with a VPC, see Running a - // Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on how to use AWS OpsWorks Stacks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information on default VPC and EC2-Classic, see Supported Platforms // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` @@ -8894,9 +9105,9 @@ type CreateUserProfileInput struct { // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], // '-', and '_'. If the specified name includes other punctuation marks, AWS - // OpsWorks removes them. For example, my.name will be changed to myname. If - // you do not specify an SSH user name, AWS OpsWorks generates one from the - // IAM user name. + // OpsWorks Stacks removes them. For example, my.name will be changed to myname. + // If you do not specify an SSH user name, AWS OpsWorks Stacks generates one + // from the IAM user name. SshUsername *string `type:"string"` } @@ -9458,9 +9669,9 @@ type DeploymentCommand struct { // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also // set the allow_reboot argument to true. // - // * allow_reboot - Specifies whether to allow AWS OpsWorks to reboot the - // instances if necessary, after installing the updates. This argument can - // be set to either true or false. The default value is false. + // * allow_reboot - Specifies whether to allow AWS OpsWorks Stacks to reboot + // the instances if necessary, after installing the updates. This argument + // can be set to either true or false. The default value is false. // // For example, to upgrade an instance to Amazon Linux 2014.09, set Args to // the following. @@ -9493,9 +9704,9 @@ type DeploymentCommand struct { // The default setting is {"migrate":["false"]}. // // * rollback Roll the app back to the previous version. When you update - // an app, AWS OpsWorks stores the previous version, up to a maximum of five - // versions. You can use this command to roll an app back as many as four - // versions. + // an app, AWS OpsWorks Stacks stores the previous version, up to a maximum + // of five versions. You can use this command to roll an app back as many + // as four versions. // // * start: Start the app's web or application server. // @@ -9764,9 +9975,9 @@ func (s DeregisterRdsDbInstanceOutput) GoString() string { type DeregisterVolumeInput struct { _ struct{} `type:"structure"` - // The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned - // to the instance when you registered the volume with the stack, not the Amazon - // EC2 volume ID. + // The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks Stacks + // assigned to the instance when you registered the volume with the stack, not + // the Amazon EC2 volume ID. // // VolumeId is a required field VolumeId *string `type:"string" required:"true"` @@ -10871,7 +11082,7 @@ func (s *DescribeStackProvisioningParametersInput) SetStackId(v string) *Describ type DescribeStackProvisioningParametersOutput struct { _ struct{} `type:"structure"` - // The AWS OpsWorks agent installer's URL. + // The AWS OpsWorks Stacks agent installer's URL. AgentInstallerUrl *string `type:"string"` // An embedded object that contains the provisioning parameters. @@ -11755,7 +11966,7 @@ func (s *GetHostnameSuggestionOutput) SetLayerId(v string) *GetHostnameSuggestio type GrantAccessInput struct { _ struct{} `type:"structure"` - // The instance's AWS OpsWorks ID. + // The instance's AWS OpsWorks Stacks ID. // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` @@ -11929,7 +12140,7 @@ type Instance struct { // For registered instances, who performed the registration. RegisteredBy *string `type:"string"` - // The instance's reported AWS OpsWorks agent version. + // The instance's reported AWS OpsWorks Stacks agent version. ReportedAgentVersion *string `type:"string"` // For registered instances, the reported operating system. @@ -12480,10 +12691,10 @@ type Layer struct { // The layer attributes. // // For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, - // AWS OpsWorks returns *****FILTERED***** instead of the actual value + // AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value // - // For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set - // to the cluster's ARN. + // For an ECS Cluster layer, AWS OpsWorks Stacks the EcsClusterArn attribute + // is set to the cluster's ARN. Attributes map[string]*string `type:"map"` // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) @@ -12495,6 +12706,9 @@ type Layer struct { // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` + // The Amazon CloudWatch Logs configuration settings for the layer. + CloudWatchLogsConfiguration *CloudWatchLogsConfiguration `type:"structure"` + // Date when the layer was created. CreatedAt *string `type:"string"` @@ -12512,12 +12726,13 @@ type Layer struct { // An array containing the layer's custom security group IDs. CustomSecurityGroupIds []*string `type:"list"` - // AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, - // undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard - // recipes for each event. In addition, you can provide custom recipes for any - // or all layers and events. AWS OpsWorks runs custom event recipes after the - // standard recipes. LayerCustomRecipes specifies the custom recipes for a particular - // layer to be run in response to each of the five events. + // AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, + // deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs + // a set of standard recipes for each event. In addition, you can provide custom + // recipes for any or all layers and events. AWS OpsWorks Stacks runs custom + // event recipes after the standard recipes. LayerCustomRecipes specifies the + // custom recipes for a particular layer to be run in response to each of the + // five events. // // To specify a recipe, use the cookbook's directory name in the repository // followed by two colons and the recipe name, which is the recipe's file name @@ -12597,6 +12812,12 @@ func (s *Layer) SetAutoAssignPublicIps(v bool) *Layer { return s } +// SetCloudWatchLogsConfiguration sets the CloudWatchLogsConfiguration field's value. +func (s *Layer) SetCloudWatchLogsConfiguration(v *CloudWatchLogsConfiguration) *Layer { + s.CloudWatchLogsConfiguration = v + return s +} + // SetCreatedAt sets the CreatedAt field's value. func (s *Layer) SetCreatedAt(v string) *Layer { s.CreatedAt = &v @@ -12736,7 +12957,7 @@ type LoadBasedAutoScalingConfiguration struct { _ struct{} `type:"structure"` // An AutoScalingThresholds object that describes the downscaling configuration, - // which defines how and when AWS OpsWorks reduces the number of instances. + // which defines how and when AWS OpsWorks Stacks reduces the number of instances. DownScaling *AutoScalingThresholds `type:"structure"` // Whether load-based auto scaling is enabled for the layer. @@ -12746,7 +12967,7 @@ type LoadBasedAutoScalingConfiguration struct { LayerId *string `type:"string"` // An AutoScalingThresholds object that describes the upscaling configuration, - // which defines how and when AWS OpsWorks increases the number of instances. + // which defines how and when AWS OpsWorks Stacks increases the number of instances. UpScaling *AutoScalingThresholds `type:"structure"` } @@ -13004,7 +13225,7 @@ type RdsDbInstance struct { // The DB instance identifier. DbInstanceIdentifier *string `type:"string"` - // AWS OpsWorks returns *****FILTERED***** instead of the actual value. + // AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. DbPassword *string `type:"string"` // The master user name. @@ -13013,9 +13234,10 @@ type RdsDbInstance struct { // The instance's database engine. Engine *string `type:"string"` - // Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. - // AWS OpsWorks attempts to discover the instance only once. If this value is - // set to true, you must deregister the instance and then register it again. + // Set to true if AWS OpsWorks Stacks is unable to discover the Amazon RDS instance. + // AWS OpsWorks Stacks attempts to discover the instance only once. If this + // value is set to true, you must deregister the instance, and then register + // it again. MissingOnRds *bool `type:"boolean"` // The instance's ARN. @@ -13024,7 +13246,7 @@ type RdsDbInstance struct { // The instance's AWS region. Region *string `type:"string"` - // The ID of the stack that the instance is registered with. + // The ID of the stack with which the instance is registered. StackId *string `type:"string"` } @@ -13146,12 +13368,13 @@ func (s RebootInstanceOutput) GoString() string { return s.String() } -// AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, -// undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard -// recipes for each event. In addition, you can provide custom recipes for any -// or all layers and events. AWS OpsWorks runs custom event recipes after the -// standard recipes. LayerCustomRecipes specifies the custom recipes for a particular -// layer to be run in response to each of the five events. +// AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, +// deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs +// a set of standard recipes for each event. In addition, you can provide custom +// recipes for any or all layers and events. AWS OpsWorks Stacks runs custom +// event recipes after the standard recipes. LayerCustomRecipes specifies the +// custom recipes for a particular layer to be run in response to each of the +// five events. // // To specify a recipe, use the cookbook's directory name in the repository // followed by two colons and the recipe name, which is the recipe's file name @@ -13472,7 +13695,7 @@ func (s *RegisterInstanceInput) SetStackId(v string) *RegisterInstanceInput { type RegisterInstanceOutput struct { _ struct{} `type:"structure"` - // The registered instance's AWS OpsWorks ID. + // The registered instance's AWS OpsWorks Stacks ID. InstanceId *string `type:"string"` } @@ -13756,7 +13979,7 @@ func (s *SelfUserProfile) SetSshUsername(v string) *SelfUserProfile { return s } -// Describes an AWS OpsWorks service error. +// Describes an AWS OpsWorks Stacks service error. // Please also see https://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/ServiceError type ServiceError struct { _ struct{} `type:"structure"` @@ -13832,7 +14055,7 @@ type SetLoadBasedAutoScalingInput struct { // An AutoScalingThresholds object with the downscaling threshold configuration. // If the load falls below these thresholds for a specified amount of time, - // AWS OpsWorks stops a specified number of instances. + // AWS OpsWorks Stacks stops a specified number of instances. DownScaling *AutoScalingThresholds `type:"structure"` // Enables load-based auto scaling for the layer. @@ -13845,7 +14068,7 @@ type SetLoadBasedAutoScalingInput struct { // An AutoScalingThresholds object with the upscaling threshold configuration. // If the load exceeds these thresholds for a specified amount of time, AWS - // OpsWorks starts a specified number of instances. + // OpsWorks Stacks starts a specified number of instances. UpScaling *AutoScalingThresholds `type:"structure"` } @@ -14102,8 +14325,8 @@ type ShutdownEventConfiguration struct { // see Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) DelayUntilElbConnectionsDrained *bool `type:"boolean"` - // The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown - // event before shutting down an instance. + // The time, in seconds, that AWS OpsWorks Stacks will wait after triggering + // a Shutdown event before shutting down an instance. ExecutionTimeout *int64 `type:"integer"` } @@ -14146,20 +14369,20 @@ type Source struct { // For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html // (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). // - // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual - // value. + // In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the + // actual value. Password *string `type:"string"` - // The application's version. AWS OpsWorks enables you to easily deploy new - // versions of an application. One of the simplest approaches is to have branches - // or revisions in your repository that represent different versions that can - // potentially be deployed. + // The application's version. AWS OpsWorks Stacks enables you to easily deploy + // new versions of an application. One of the simplest approaches is to have + // branches or revisions in your repository that represent different versions + // that can potentially be deployed. Revision *string `type:"string"` // In requests, the repository's SSH key. // - // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual - // value. + // In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the + // actual value. SshKey *string `type:"string"` // The repository type. @@ -14372,8 +14595,8 @@ type Stack struct { // Whether the stack uses custom cookbooks. UseCustomCookbooks *bool `type:"boolean"` - // Whether the stack automatically associates the AWS OpsWorks built-in security - // groups with the stack's layers. + // Whether the stack automatically associates the AWS OpsWorks Stacks built-in + // security groups with the stack's layers. UseOpsworksSecurityGroups *bool `type:"boolean"` // The VPC ID; applicable only if the stack is running in a VPC. @@ -14850,7 +15073,7 @@ func (s StopStackOutput) GoString() string { type TemporaryCredential struct { _ struct{} `type:"structure"` - // The instance's AWS OpsWorks ID. + // The instance's AWS OpsWorks Stacks ID. InstanceId *string `type:"string"` // The password. @@ -15282,18 +15505,20 @@ func (s UpdateElasticIpOutput) GoString() string { type UpdateInstanceInput struct { _ struct{} `type:"structure"` - // The default AWS OpsWorks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following options: // // * INHERIT - Use the stack's default agent version setting. // // * version_number - Use the specified agent version. This value overrides // the stack's default setting. To update the agent version, you must edit - // the instance configuration and specify a new version. AWS OpsWorks then - // automatically installs that version on the instance. + // the instance configuration and specify a new version. AWS OpsWorks Stacks + // then automatically installs that version on the instance. // // The default setting is INHERIT. To specify an agent version, you must use // the complete version number, not the abbreviated number shown on the console. // For a list of available agent version numbers, call DescribeAgentVersions. + // + // AgentVersion cannot be set to Chef 12.2. AgentVersion *string `type:"string"` // The ID of the AMI that was used to create the instance. The value of this @@ -15347,12 +15572,13 @@ type UpdateInstanceInput struct { // You cannot update an instance that is using a custom AMI. // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon + // Linux 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS 7 + // * CentOS Linux 7 // // * Red Hat Enterprise Linux 7 // @@ -15362,7 +15588,7 @@ type UpdateInstanceInput struct { // Windows Server 2012 R2 with SQL Server Web. // // For more information on the supported operating systems, see AWS OpsWorks - // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // Stacks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this parameter // to Custom, you must use the AmiId parameter to specify the custom AMI that @@ -15505,6 +15731,10 @@ type UpdateLayerInput struct { // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). AutoAssignPublicIps *bool `type:"boolean"` + // Specifies CloudWatch Logs configuration options for the layer. For more information, + // see CloudWatchLogsLogStream. + CloudWatchLogsConfiguration *CloudWatchLogsConfiguration `type:"structure"` + // The ARN of an IAM profile to be used for all of the layer's EC2 instances. // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). CustomInstanceProfileArn *string `type:"string"` @@ -15547,13 +15777,13 @@ type UpdateLayerInput struct { Packages []*string `type:"list"` // For custom layers only, use this parameter to specify the layer's short name, - // which is used internally by AWS OpsWorksand by Chef. The short name is also - // used as the name for the directory where your app files are installed. It - // can have a maximum of 200 characters and must be in the following format: + // which is used internally by AWS OpsWorks Stacks and by Chef. The short name + // is also used as the name for the directory where your app files are installed. + // It can have a maximum of 200 characters and must be in the following format: // /\A[a-z0-9\-\_\.]+\Z/. // - // The built-in layers' short names are defined by AWS OpsWorks. For more information, - // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) + // The built-in layers' short names are defined by AWS OpsWorks Stacks. For + // more information, see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) Shortname *string `type:"string"` // Whether to use Amazon EBS-optimized instances. @@ -15614,6 +15844,12 @@ func (s *UpdateLayerInput) SetAutoAssignPublicIps(v bool) *UpdateLayerInput { return s } +// SetCloudWatchLogsConfiguration sets the CloudWatchLogsConfiguration field's value. +func (s *UpdateLayerInput) SetCloudWatchLogsConfiguration(v *CloudWatchLogsConfiguration) *UpdateLayerInput { + s.CloudWatchLogsConfiguration = v + return s +} + // SetCustomInstanceProfileArn sets the CustomInstanceProfileArn field's value. func (s *UpdateLayerInput) SetCustomInstanceProfileArn(v string) *UpdateLayerInput { s.CustomInstanceProfileArn = &v @@ -15822,20 +16058,21 @@ func (s UpdateRdsDbInstanceOutput) GoString() string { type UpdateStackInput struct { _ struct{} `type:"structure"` - // The default AWS OpsWorks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following options: // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically // installs new agent versions on the stack's instances as soon as they are // available. // // * Fixed version - Set this parameter to your preferred agent version. // To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks then automatically installs that version - // on the stack's instances. + // specify a new version. AWS OpsWorks Stacks then automatically installs + // that version on the stack's instances. // // The default setting is LATEST. To specify an agent version, you must use // the complete version number, not the abbreviated number shown on the console. // For a list of available agent version numbers, call DescribeAgentVersions. + // AgentVersion cannot be set to Chef 12.2. // // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. @@ -15884,12 +16121,13 @@ type UpdateStackInput struct { // The stack's operating system, which must be set to one of the following: // // * A supported Linux operating system: An Amazon Linux version, such as - // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon + // Linux 2015.03. // // * A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu // 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS 7 + // * CentOS Linux 7 // // * Red Hat Enterprise Linux 7 // @@ -15903,7 +16141,8 @@ type UpdateStackInput struct { // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The default option is the stack's current operating system. For more information - // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // on the supported operating systems, see AWS OpsWorks Stacks Operating Systems + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` // The default root device type. This value is used by default for all instances @@ -15912,8 +16151,8 @@ type UpdateStackInput struct { DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` // A default Amazon EC2 key-pair name. The default value is none. If you specify - // a key-pair name, AWS OpsWorks installs the public key on the instance and - // you can use the private key with an SSH client to log in to the instance. + // a key-pair name, AWS OpsWorks Stacks installs the public key on the instance + // and you can use the private key with an SSH client to log in to the instance. // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). // You can override this setting by specifying a different key pair, or no key @@ -15973,21 +16212,21 @@ type UpdateStackInput struct { // Whether the stack uses custom cookbooks. UseCustomCookbooks *bool `type:"boolean"` - // Whether to associate the AWS OpsWorks built-in security groups with the stack's - // layers. + // Whether to associate the AWS OpsWorks Stacks built-in security groups with + // the stack's layers. // - // AWS OpsWorks provides a standard set of built-in security groups, one for - // each layer, which are associated with layers by default. UseOpsworksSecurityGroups + // AWS OpsWorks Stacks provides a standard set of built-in security groups, + // one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups // allows you to provide your own custom security groups instead of using the // built-in groups. UseOpsworksSecurityGroups has the following settings: // - // * True - AWS OpsWorks automatically associates the appropriate built-in - // security group with each layer (default setting). You can associate additional - // security groups with a layer after you create it, but you cannot delete - // the built-in security group. + // * True - AWS OpsWorks Stacks automatically associates the appropriate + // built-in security group with each layer (default setting). You can associate + // additional security groups with a layer after you create it, but you cannot + // delete the built-in security group. // - // * False - AWS OpsWorks does not associate built-in security groups with - // layers. You must create appropriate EC2 security groups and associate + // * False - AWS OpsWorks Stacks does not associate built-in security groups + // with layers. You must create appropriate EC2 security groups and associate // a security group with each layer that you create. However, you can still // manually associate a built-in security group with a layer on. Custom security // groups are required only for those layers that need custom settings. @@ -16160,9 +16399,9 @@ type UpdateUserProfileInput struct { // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], // '-', and '_'. If the specified name includes other punctuation marks, AWS - // OpsWorks removes them. For example, my.name will be changed to myname. If - // you do not specify an SSH user name, AWS OpsWorks generates one from the - // IAM user name. + // OpsWorks Stacks removes them. For example, my.name will be changed to myname. + // If you do not specify an SSH user name, AWS OpsWorks Stacks generates one + // from the IAM user name. SshUsername *string `type:"string"` } @@ -16747,6 +16986,308 @@ const ( AutoScalingTypeTimer = "timer" ) +// Specifies the encoding of the log file so that the file can be read correctly. +// The default is utf_8. Encodings supported by Python codecs.decode() can be +// used here. +const ( + // CloudWatchLogsEncodingAscii is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingAscii = "ascii" + + // CloudWatchLogsEncodingBig5 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingBig5 = "big5" + + // CloudWatchLogsEncodingBig5hkscs is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingBig5hkscs = "big5hkscs" + + // CloudWatchLogsEncodingCp037 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp037 = "cp037" + + // CloudWatchLogsEncodingCp424 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp424 = "cp424" + + // CloudWatchLogsEncodingCp437 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp437 = "cp437" + + // CloudWatchLogsEncodingCp500 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp500 = "cp500" + + // CloudWatchLogsEncodingCp720 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp720 = "cp720" + + // CloudWatchLogsEncodingCp737 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp737 = "cp737" + + // CloudWatchLogsEncodingCp775 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp775 = "cp775" + + // CloudWatchLogsEncodingCp850 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp850 = "cp850" + + // CloudWatchLogsEncodingCp852 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp852 = "cp852" + + // CloudWatchLogsEncodingCp855 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp855 = "cp855" + + // CloudWatchLogsEncodingCp856 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp856 = "cp856" + + // CloudWatchLogsEncodingCp857 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp857 = "cp857" + + // CloudWatchLogsEncodingCp858 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp858 = "cp858" + + // CloudWatchLogsEncodingCp860 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp860 = "cp860" + + // CloudWatchLogsEncodingCp861 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp861 = "cp861" + + // CloudWatchLogsEncodingCp862 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp862 = "cp862" + + // CloudWatchLogsEncodingCp863 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp863 = "cp863" + + // CloudWatchLogsEncodingCp864 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp864 = "cp864" + + // CloudWatchLogsEncodingCp865 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp865 = "cp865" + + // CloudWatchLogsEncodingCp866 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp866 = "cp866" + + // CloudWatchLogsEncodingCp869 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp869 = "cp869" + + // CloudWatchLogsEncodingCp874 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp874 = "cp874" + + // CloudWatchLogsEncodingCp875 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp875 = "cp875" + + // CloudWatchLogsEncodingCp932 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp932 = "cp932" + + // CloudWatchLogsEncodingCp949 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp949 = "cp949" + + // CloudWatchLogsEncodingCp950 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp950 = "cp950" + + // CloudWatchLogsEncodingCp1006 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1006 = "cp1006" + + // CloudWatchLogsEncodingCp1026 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1026 = "cp1026" + + // CloudWatchLogsEncodingCp1140 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1140 = "cp1140" + + // CloudWatchLogsEncodingCp1250 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1250 = "cp1250" + + // CloudWatchLogsEncodingCp1251 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1251 = "cp1251" + + // CloudWatchLogsEncodingCp1252 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1252 = "cp1252" + + // CloudWatchLogsEncodingCp1253 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1253 = "cp1253" + + // CloudWatchLogsEncodingCp1254 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1254 = "cp1254" + + // CloudWatchLogsEncodingCp1255 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1255 = "cp1255" + + // CloudWatchLogsEncodingCp1256 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1256 = "cp1256" + + // CloudWatchLogsEncodingCp1257 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1257 = "cp1257" + + // CloudWatchLogsEncodingCp1258 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingCp1258 = "cp1258" + + // CloudWatchLogsEncodingEucJp is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingEucJp = "euc_jp" + + // CloudWatchLogsEncodingEucJis2004 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingEucJis2004 = "euc_jis_2004" + + // CloudWatchLogsEncodingEucJisx0213 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingEucJisx0213 = "euc_jisx0213" + + // CloudWatchLogsEncodingEucKr is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingEucKr = "euc_kr" + + // CloudWatchLogsEncodingGb2312 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingGb2312 = "gb2312" + + // CloudWatchLogsEncodingGbk is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingGbk = "gbk" + + // CloudWatchLogsEncodingGb18030 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingGb18030 = "gb18030" + + // CloudWatchLogsEncodingHz is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingHz = "hz" + + // CloudWatchLogsEncodingIso2022Jp is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Jp = "iso2022_jp" + + // CloudWatchLogsEncodingIso2022Jp1 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Jp1 = "iso2022_jp_1" + + // CloudWatchLogsEncodingIso2022Jp2 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Jp2 = "iso2022_jp_2" + + // CloudWatchLogsEncodingIso2022Jp2004 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Jp2004 = "iso2022_jp_2004" + + // CloudWatchLogsEncodingIso2022Jp3 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Jp3 = "iso2022_jp_3" + + // CloudWatchLogsEncodingIso2022JpExt is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022JpExt = "iso2022_jp_ext" + + // CloudWatchLogsEncodingIso2022Kr is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso2022Kr = "iso2022_kr" + + // CloudWatchLogsEncodingLatin1 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingLatin1 = "latin_1" + + // CloudWatchLogsEncodingIso88592 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88592 = "iso8859_2" + + // CloudWatchLogsEncodingIso88593 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88593 = "iso8859_3" + + // CloudWatchLogsEncodingIso88594 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88594 = "iso8859_4" + + // CloudWatchLogsEncodingIso88595 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88595 = "iso8859_5" + + // CloudWatchLogsEncodingIso88596 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88596 = "iso8859_6" + + // CloudWatchLogsEncodingIso88597 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88597 = "iso8859_7" + + // CloudWatchLogsEncodingIso88598 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88598 = "iso8859_8" + + // CloudWatchLogsEncodingIso88599 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso88599 = "iso8859_9" + + // CloudWatchLogsEncodingIso885910 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso885910 = "iso8859_10" + + // CloudWatchLogsEncodingIso885913 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso885913 = "iso8859_13" + + // CloudWatchLogsEncodingIso885914 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso885914 = "iso8859_14" + + // CloudWatchLogsEncodingIso885915 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso885915 = "iso8859_15" + + // CloudWatchLogsEncodingIso885916 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingIso885916 = "iso8859_16" + + // CloudWatchLogsEncodingJohab is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingJohab = "johab" + + // CloudWatchLogsEncodingKoi8R is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingKoi8R = "koi8_r" + + // CloudWatchLogsEncodingKoi8U is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingKoi8U = "koi8_u" + + // CloudWatchLogsEncodingMacCyrillic is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacCyrillic = "mac_cyrillic" + + // CloudWatchLogsEncodingMacGreek is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacGreek = "mac_greek" + + // CloudWatchLogsEncodingMacIceland is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacIceland = "mac_iceland" + + // CloudWatchLogsEncodingMacLatin2 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacLatin2 = "mac_latin2" + + // CloudWatchLogsEncodingMacRoman is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacRoman = "mac_roman" + + // CloudWatchLogsEncodingMacTurkish is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingMacTurkish = "mac_turkish" + + // CloudWatchLogsEncodingPtcp154 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingPtcp154 = "ptcp154" + + // CloudWatchLogsEncodingShiftJis is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingShiftJis = "shift_jis" + + // CloudWatchLogsEncodingShiftJis2004 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingShiftJis2004 = "shift_jis_2004" + + // CloudWatchLogsEncodingShiftJisx0213 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingShiftJisx0213 = "shift_jisx0213" + + // CloudWatchLogsEncodingUtf32 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf32 = "utf_32" + + // CloudWatchLogsEncodingUtf32Be is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf32Be = "utf_32_be" + + // CloudWatchLogsEncodingUtf32Le is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf32Le = "utf_32_le" + + // CloudWatchLogsEncodingUtf16 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf16 = "utf_16" + + // CloudWatchLogsEncodingUtf16Be is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf16Be = "utf_16_be" + + // CloudWatchLogsEncodingUtf16Le is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf16Le = "utf_16_le" + + // CloudWatchLogsEncodingUtf7 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf7 = "utf_7" + + // CloudWatchLogsEncodingUtf8 is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf8 = "utf_8" + + // CloudWatchLogsEncodingUtf8Sig is a CloudWatchLogsEncoding enum value + CloudWatchLogsEncodingUtf8Sig = "utf_8_sig" +) + +// Specifies where to start to read data (start_of_file or end_of_file). The +// default is start_of_file. It's only used if there is no state persisted for +// that log stream. +const ( + // CloudWatchLogsInitialPositionStartOfFile is a CloudWatchLogsInitialPosition enum value + CloudWatchLogsInitialPositionStartOfFile = "start_of_file" + + // CloudWatchLogsInitialPositionEndOfFile is a CloudWatchLogsInitialPosition enum value + CloudWatchLogsInitialPositionEndOfFile = "end_of_file" +) + +// The preferred time zone for logs streamed to CloudWatch Logs. Valid values +// are LOCAL and UTC, for Coordinated Universal Time. +const ( + // CloudWatchLogsTimeZoneLocal is a CloudWatchLogsTimeZone enum value + CloudWatchLogsTimeZoneLocal = "LOCAL" + + // CloudWatchLogsTimeZoneUtc is a CloudWatchLogsTimeZone enum value + CloudWatchLogsTimeZoneUtc = "UTC" +) + const ( // DeploymentCommandNameInstallDependencies is a DeploymentCommandName enum value DeploymentCommandNameInstallDependencies = "install_dependencies" diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index 60fa2c02e..440f0f720 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -11,20 +11,20 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) -// Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, -// syntax, and usage examples for AWS OpsWorks actions and data types, including -// common parameters and error codes. +// Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, +// syntax, and usage examples for AWS OpsWorks Stacks actions and data types, +// including common parameters and error codes. // -// AWS OpsWorks is an application management service that provides an integrated -// experience for overseeing the complete application lifecycle. For information -// about this product, go to the AWS OpsWorks (http://aws.amazon.com/opsworks/) +// AWS OpsWorks Stacks is an application management service that provides an +// integrated experience for overseeing the complete application lifecycle. +// For information about this product, go to the AWS OpsWorks (http://aws.amazon.com/opsworks/) // details page. // // SDKs and CLI // -// The most common way to use the AWS OpsWorks API is by using the AWS Command -// Line Interface (CLI) or by using one of the AWS SDKs to implement applications -// in your preferred language. For more information, see: +// The most common way to use the AWS OpsWorks Stacks API is by using the AWS +// Command Line Interface (CLI) or by using one of the AWS SDKs to implement +// applications in your preferred language. For more information, see: // // * AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) // @@ -42,18 +42,22 @@ import ( // // Endpoints // -// AWS OpsWorks supports the following endpoints, all HTTPS. You must connect -// to one of the following endpoints. Stacks can only be accessed or managed -// within the endpoint in which they are created. +// AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must +// connect to one of the following endpoints. Stacks can only be accessed or +// managed within the endpoint in which they are created. // // * opsworks.us-east-1.amazonaws.com // +// * opsworks.us-east-2.amazonaws.com +// // * opsworks.us-west-1.amazonaws.com // // * opsworks.us-west-2.amazonaws.com // // * opsworks.eu-west-1.amazonaws.com // +// * opsworks.eu-west-2.amazonaws.com +// // * opsworks.eu-central-1.amazonaws.com // // * opsworks.ap-northeast-1.amazonaws.com diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go index cf050546d..606175591 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -4471,6 +4471,10 @@ func (c *Redshift) DescribeReservedNodeOfferingsRequest(input *DescribeReservedN // * ErrCodeUnsupportedOperationFault "UnsupportedOperation" // The requested operation isn't supported. // +// * ErrCodeDependentServiceUnavailableFault "DependentServiceUnavailableFault" +// Your request cannot be completed because a dependent internal service is +// temporarily unavailable. Wait 30 to 60 seconds and try again. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeReservedNodeOfferings func (c *Redshift) DescribeReservedNodeOfferings(input *DescribeReservedNodeOfferingsInput) (*DescribeReservedNodeOfferingsOutput, error) { req, out := c.DescribeReservedNodeOfferingsRequest(input) @@ -4607,6 +4611,10 @@ func (c *Redshift) DescribeReservedNodesRequest(input *DescribeReservedNodesInpu // * ErrCodeReservedNodeNotFoundFault "ReservedNodeNotFound" // The specified reserved compute node not found. // +// * ErrCodeDependentServiceUnavailableFault "DependentServiceUnavailableFault" +// Your request cannot be completed because a dependent internal service is +// temporarily unavailable. Wait 30 to 60 seconds and try again. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeReservedNodes func (c *Redshift) DescribeReservedNodes(input *DescribeReservedNodesInput) (*DescribeReservedNodesOutput, error) { req, out := c.DescribeReservedNodesRequest(input) @@ -5431,6 +5439,111 @@ func (c *Redshift) EnableSnapshotCopyWithContext(ctx aws.Context, input *EnableS return out, req.Send() } +const opGetClusterCredentials = "GetClusterCredentials" + +// GetClusterCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetClusterCredentials operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetClusterCredentials for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetClusterCredentials method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetClusterCredentialsRequest method. +// req, resp := client.GetClusterCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/GetClusterCredentials +func (c *Redshift) GetClusterCredentialsRequest(input *GetClusterCredentialsInput) (req *request.Request, output *GetClusterCredentialsOutput) { + op := &request.Operation{ + Name: opGetClusterCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetClusterCredentialsInput{} + } + + output = &GetClusterCredentialsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetClusterCredentials API operation for Amazon Redshift. +// +// Returns a database user name and temporary password with temporary authorization +// to log in to an Amazon Redshift database. The action returns the database +// user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate +// is True. You can optionally specify one or more database user groups that +// the user will join at log in. By default, the temporary credentials expire +// in 900 seconds. You can optionally specify a duration between 900 seconds +// (15 minutes) and 3600 seconds (60 minutes). For more information, see Generating +// IAM Database User Credentials in the Amazon Redshift Cluster Management Guide. +// +// The IAM user or role that executes GetClusterCredentials must have an IAM +// policy attached that allows the redshift:GetClusterCredentials action with +// access to the dbuser resource on the cluster. The user name specified for +// dbuser in the IAM policy and the user name specified for the DbUser parameter +// must match. +// +// If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup +// action with access to the listed dbgroups. +// +// In addition, if the AutoCreate parameter is set to True, then the policy +// must include the redshift:CreateClusterUser privilege. +// +// If the DbName parameter is specified, the IAM policy must allow access to +// the resource dbname for the specified database name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation GetClusterCredentials for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFound" +// The ClusterIdentifier parameter does not refer to an existing cluster. +// +// * ErrCodeUnsupportedOperationFault "UnsupportedOperation" +// The requested operation isn't supported. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/GetClusterCredentials +func (c *Redshift) GetClusterCredentials(input *GetClusterCredentialsInput) (*GetClusterCredentialsOutput, error) { + req, out := c.GetClusterCredentialsRequest(input) + return out, req.Send() +} + +// GetClusterCredentialsWithContext is the same as GetClusterCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetClusterCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) GetClusterCredentialsWithContext(ctx aws.Context, input *GetClusterCredentialsInput, opts ...request.Option) (*GetClusterCredentialsOutput, error) { + req, out := c.GetClusterCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyCluster = "ModifyCluster" // ModifyClusterRequest generates a "aws/request.Request" representing the @@ -5511,6 +5624,9 @@ func (c *Redshift) ModifyClusterRequest(input *ModifyClusterInput) (req *request // (http://docs.aws.amazon.com/redshift/latest/mgmt/amazon-redshift-limits.html) // in the Amazon Redshift Cluster Management Guide. // +// * ErrCodeNumberOfNodesPerClusterLimitExceededFault "NumberOfNodesPerClusterLimitExceeded" +// The operation would exceed the number of nodes allowed for a cluster. +// // * ErrCodeClusterSecurityGroupNotFoundFault "ClusterSecurityGroupNotFound" // The cluster security group name does not refer to an existing cluster security // group. @@ -6881,6 +6997,10 @@ func (c *Redshift) RotateEncryptionKeyWithContext(ctx aws.Context, input *Rotate type AccountWithRestoreAccess struct { _ struct{} `type:"structure"` + // The identifier of an AWS support account authorized to restore a snapshot. + // For AWS support, the identifier is amazon-redshift-support. + AccountAlias *string `type:"string"` + // The identifier of an AWS customer account authorized to restore a snapshot. AccountId *string `type:"string"` } @@ -6895,6 +7015,12 @@ func (s AccountWithRestoreAccess) GoString() string { return s.String() } +// SetAccountAlias sets the AccountAlias field's value. +func (s *AccountWithRestoreAccess) SetAccountAlias(v string) *AccountWithRestoreAccess { + s.AccountAlias = &v + return s +} + // SetAccountId sets the AccountId field's value. func (s *AccountWithRestoreAccess) SetAccountId(v string) *AccountWithRestoreAccess { s.AccountId = &v @@ -7002,6 +7128,8 @@ type AuthorizeSnapshotAccessInput struct { // The identifier of the AWS customer account authorized to restore the specified // snapshot. // + // To share a snapshot with AWS support, specify amazon-redshift-support. + // // AccountWithRestoreAccess is a required field AccountWithRestoreAccess *string `type:"string" required:"true"` @@ -13484,6 +13612,185 @@ func (s *EventSubscription) SetTags(v []*Tag) *EventSubscription { return s } +// The request parameters to get cluster credentials. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/GetClusterCredentialsMessage +type GetClusterCredentialsInput struct { + _ struct{} `type:"structure"` + + // Create a database user with the name specified for DbUser if one does not + // exist. + AutoCreate *bool `type:"boolean"` + + // The unique identifier of the cluster that contains the database for which + // your are requesting credentials. This parameter is case sensitive. + // + // ClusterIdentifier is a required field + ClusterIdentifier *string `type:"string" required:"true"` + + // A list of the names of existing database groups that DbUser will join for + // the current session. If not specified, the new user is added only to PUBLIC. + DbGroups []*string `locationNameList:"DbGroup" type:"list"` + + // The name of a database that DbUser is authorized to log on to. If DbName + // is not specified, DbUser can log in to any existing database. + // + // Constraints: + // + // * Must be 1 to 64 alphanumeric characters or hyphens + // + // * Must contain only lowercase letters. + // + // * Cannot be a reserved word. A list of reserved words can be found in + // Reserved Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + DbName *string `type:"string"` + + // The name of a database user. If a user name matching DbUser exists in the + // database, the temporary user credentials have the same permissions as the + // existing user. If DbUser doesn't exist in the database and Autocreate is + // True, a new user is created using the value for DbUser with PUBLIC permissions. + // If a database user matching the value for DbUser doesn't exist and Autocreate + // is False, then the command succeeds but the connection attempt will fail + // because the user doesn't exist in the database. + // + // For more information, see CREATE USER (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html) + // in the Amazon Redshift Database Developer Guide. + // + // Constraints: + // + // * Must be 1 to 128 alphanumeric characters or hyphens + // + // * Must contain only lowercase letters. + // + // * First character must be a letter. + // + // * Must not contain a colon ( : ) or slash ( / ). + // + // * Cannot be a reserved word. A list of reserved words can be found in + // Reserved Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + // + // DbUser is a required field + DbUser *string `type:"string" required:"true"` + + // The number of seconds until the returned temporary password expires. + // + // Constraint: minimum 900, maximum 3600. + // + // Default: 900 + DurationSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetClusterCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClusterCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetClusterCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetClusterCredentialsInput"} + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.DbUser == nil { + invalidParams.Add(request.NewErrParamRequired("DbUser")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoCreate sets the AutoCreate field's value. +func (s *GetClusterCredentialsInput) SetAutoCreate(v bool) *GetClusterCredentialsInput { + s.AutoCreate = &v + return s +} + +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *GetClusterCredentialsInput) SetClusterIdentifier(v string) *GetClusterCredentialsInput { + s.ClusterIdentifier = &v + return s +} + +// SetDbGroups sets the DbGroups field's value. +func (s *GetClusterCredentialsInput) SetDbGroups(v []*string) *GetClusterCredentialsInput { + s.DbGroups = v + return s +} + +// SetDbName sets the DbName field's value. +func (s *GetClusterCredentialsInput) SetDbName(v string) *GetClusterCredentialsInput { + s.DbName = &v + return s +} + +// SetDbUser sets the DbUser field's value. +func (s *GetClusterCredentialsInput) SetDbUser(v string) *GetClusterCredentialsInput { + s.DbUser = &v + return s +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetClusterCredentialsInput) SetDurationSeconds(v int64) *GetClusterCredentialsInput { + s.DurationSeconds = &v + return s +} + +// Temporary credentials with authorization to log in to an Amazon Redshift +// database. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/ClusterCredentials +type GetClusterCredentialsOutput struct { + _ struct{} `type:"structure"` + + // A temporary password that authorizes the user name returned by DbUser to + // log on to the database DbName. + DbPassword *string `type:"string"` + + // A database user name that is authorized to log on to the database DbName + // using the password DbPassword. If the DbGroups parameter is specifed, DbUser + // is added to the listed groups for the current session. The user name is prefixed + // with IAM: for an existing user name or IAMA: if the user was auto-created. + DbUser *string `type:"string"` + + // The date and time DbPassword expires. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetClusterCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClusterCredentialsOutput) GoString() string { + return s.String() +} + +// SetDbPassword sets the DbPassword field's value. +func (s *GetClusterCredentialsOutput) SetDbPassword(v string) *GetClusterCredentialsOutput { + s.DbPassword = &v + return s +} + +// SetDbUser sets the DbUser field's value. +func (s *GetClusterCredentialsOutput) SetDbUser(v string) *GetClusterCredentialsOutput { + s.DbUser = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetClusterCredentialsOutput) SetExpiration(v time.Time) *GetClusterCredentialsOutput { + s.Expiration = &v + return s +} + // Returns information about an HSM client certificate. The certificate is stored // in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift // cluster to encrypt data files. diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go index 74b4ef87c..8b0570fe3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go @@ -165,6 +165,13 @@ const ( // requests made by Amazon Redshift on your behalf. Wait and retry the request. ErrCodeDependentServiceRequestThrottlingFault = "DependentServiceRequestThrottlingFault" + // ErrCodeDependentServiceUnavailableFault for service response error code + // "DependentServiceUnavailableFault". + // + // Your request cannot be completed because a dependent internal service is + // temporarily unavailable. Wait 30 to 60 seconds and try again. + ErrCodeDependentServiceUnavailableFault = "DependentServiceUnavailableFault" + // ErrCodeEventSubscriptionQuotaExceededFault for service response error code // "EventSubscriptionQuotaExceeded". // diff --git a/vendor/vendor.json b/vendor/vendor.json index f006bec37..bf1591a7b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -495,340 +495,340 @@ "revisionTime": "2017-01-23T00:46:44Z" }, { - "checksumSHA1": "wvNp7Z0aIf9CCLYtzXpcO90YWbg=", + "checksumSHA1": "Km15hcxupg+Fejy1CmPMv1b1Qew=", "path": "github.com/aws/aws-sdk-go", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "FQz+RL20lsUYIpT2CNpYeyKn8Lg=", + "checksumSHA1": "jhBCqnseVTWZiSOXrAXWjSmuIOM=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "iThCyNRL/oQFD9CF2SYgBGl+aww=", + "checksumSHA1": "iA8gPEZQ0g2lMwf8gfjOVqUiYc4=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "0Gfk83qXYimO87ZoK1lL9+ifWHo=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "P7gt3PNk6bDOoTZ2N9QOonkaGWw=", + "checksumSHA1": "WKv1OkJtlhIHUjes6bB3QoWOA7o=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "6cj/zsRmcxkE1TLS+v910GbQYg0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "l2O7P/kvovK2zxKhuFehFNXLk+Q=", + "checksumSHA1": "k4IMA27NIDHgZgvBxrKyJy16Y20=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "+yCOae0vRONrO27QiITkGWblOKk=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "/L6UweKsmfyHTu01qrFD1ijzSbE=", + "checksumSHA1": "uqNleRWfPXWHwX7ROArYyOuIp0w=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "5pzA5afgeU1alfACFh8z2CDUMao=", + "checksumSHA1": "24VtK/Hym9lC8LkZlGLMdFGq+5o=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "SvIsunO8D9MEKbetMENA4WRnyeE=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "lZ1z4xAbT8euCzKoAsnEYic60VE=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ZmojxECvjM6BeI752BPyZAmOhlo=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "H3h5AMX7c9oT50oovfJIfmkvoBg=", + "checksumSHA1": "PZpt6OZ+8tE7sPXLSVFZpyKbNOA=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "3ykAVetHFs9T3YivIPvRyiNFdys=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "/d8U22aF2+qYhWYscPzClHTDCP4=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "n6v4S6jPpkHsS59Oj1EZPQIdRNg=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "QLEaEFA3V4n+ohwENEoWV+AXBj4=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Vh3PtQEwIUabpoE7PsCZItUZuVc=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "aGx2atOHEXSowjXUQ3UoJ/t2LSI=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Ez3+aU0QGRe4isLDFQuHNRyF3zA=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "+AjVMO3KUY7Wkh0vHRnJqRG8kGc=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "uTt6pA8eB+udA7tC8ElLbr2eeK4=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "sqppuUIMPMBOnTRVR4BhHAoaTrY=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "u6cK2krOuDqi8gy5V316FvH34t0=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "fK7MOfX/cV2DJ176+umySuuYh2s=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "L8F5aJdwCvoNwrP6prtHSdklijM=", @@ -841,298 +841,298 @@ { "checksumSHA1": "gSm1lj0J4klQMw7jHE0fU/RV+4Y=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "SP6m/hn+Hj72wkgaAZ8NM/7s/18=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "2Su2xzCbUPbCdVkyWuXcmxAI2Rs=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Y4Wg7dxPIU3W1dqN3vnpSLA1ChQ=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "2PIG7uhrvvDAjiNZINBVCgW/Uds=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ClGPl4TLpf457zUeOEWyTvqBRjc=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "c6KWQtc1bRCFs/IuIe/jgZXalBw=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "4mBZS9FSCW73hcjj0CikPqpikag=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "P7GrpZV3eYQASV8Z+DeFuo9zbm4=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "DXs9Zpa2Db2adBjDi/EyFp6913E=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "dv1QkeLjDyUlMQkbnLjm6l0mJHo=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "ir6xGAYAwIdWKgk7BVHNQWvlA/g=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "sdFllfq+lllwyk0yMFmWzg+qs9Y=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "oJQzYnuAHAhKAtAuinSPEeDsXoU=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "tLfj5mQiTOOhWdeU6hL5PYRAEP0=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Yy7CkVZR1/vrcdMPWJmQMC2i5hk=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "tuoOAm2gCN2txnIq1jKbCHqeQQM=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "NoG5QpuGo3iLNk6DwwWsDCogfGY=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "5ElupFtEcDvKa1yXTh6nR9HijMU=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "Yzxk0tkTh2D9JP5I8gspLQLKu0U=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "zeEh/FDxM81fU3X2ftWU2Z++iQg=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "bHA5BLaVmAq8G5R40tv/X3HF5J0=", + "checksumSHA1": "H25POIGzyemmnJ+06HoAziXxW4I=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "GFXjkh1wWzohbefi1k0N+zbkmU4=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "AB2pSc+tsnoNxFg0fSMDn7rFZbM=", + "checksumSHA1": "SqXsYVwBsvHwXRd2VAb5Us9F6Vw=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "5Br7nJBgOm6y67Z95CGZtOaxlFY=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { - "checksumSHA1": "COvVop5UbeJ4P0cMu+0ekubPLtE=", + "checksumSHA1": "TIYqqHM4J5j5tWZR+FLpRpQzz7A=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "e/lUvi2TAO9hms6HOzpX61exefw=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "o7qpn0kxj43Ej/RwfCb9JbzfbfQ=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "/2UKYWNc/LRv+M/LQRpJqukcXzc=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "eUrUJOZg3sQHWyYKPRPO9OeN+a4=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "CVWvzoJ3YBvEI8TdQWlqUxOt9lk=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "bJ8g3OhBAkxM+QaFrQCD0L0eWY8=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "jzKBnso2Psx3CyS+0VR1BzvuccU=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "GPD+dDmDtseJFG8lB8aU58aszDg=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "SdsHiTUR9eRarThv/i7y6/rVyF4=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "w3+CyiPRk1WUFFmueIRZkgQuHH0=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "QgNbH3Mxe4jiu3IN+vPAnz/IWbw=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "d643bdf04f2cc6f95424f9f4e69037a563fc0736", - "revisionTime": "2017-04-06T18:01:00Z", - "version": "v1.8.10", - "versionExact": "v1.8.10" + "revision": "4bbd6fa3fdede4c68e941248f974b8951c17de89", + "revisionTime": "2017-04-18T18:52:59Z", + "version": "v1.8.13", + "versionExact": "v1.8.13" }, { "checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=", From 1608f5544f1df855511fbc9e54e89f9cd42d1592 Mon Sep 17 00:00:00 2001 From: = Date: Wed, 19 Apr 2017 14:22:35 -0600 Subject: [PATCH 220/342] Added error check --- helper/resource/testing.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/helper/resource/testing.go b/helper/resource/testing.go index 5f08c2a3f..04367c53c 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -367,7 +367,10 @@ func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFac for k, pf := range ctxProviders { // we can ignore any errors here, if we don't have a provider to reset // the error will be handled later - p, _ := pf() + p, err := pf() + if err != nil { + return nil, err + } if p, ok := p.(TestProvider); ok { err := p.TestReset() if err != nil { From 3d315baee56a9ff22285e79cab38ab8340e376af Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Cutrali Date: Wed, 19 Apr 2017 17:30:58 -0400 Subject: [PATCH 221/342] update spelling of Bitbucket --- .../docs/enterprise/vcs/bitbucket.html.md | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/website/source/docs/enterprise/vcs/bitbucket.html.md b/website/source/docs/enterprise/vcs/bitbucket.html.md index fb0cf911e..1d02a69b5 100644 --- a/website/source/docs/enterprise/vcs/bitbucket.html.md +++ b/website/source/docs/enterprise/vcs/bitbucket.html.md @@ -1,13 +1,13 @@ --- layout: "enterprise" -page_title: "BitBucket Cloud - VCS Integrations - Terraform Enterprise" +page_title: "Bitbucket Cloud - VCS Integrations - Terraform Enterprise" sidebar_current: "docs-enterprise-vcs-bitbucket-cloud" description: |- - BitBucket Cloud repositories can be integrated with Terraform Enterprise by using push command. + Bitbucket Cloud repositories can be integrated with Terraform Enterprise by using push command. --- -# BitBucket Cloud +# Bitbucket Cloud -BitBucket Cloud can be used to import Terraform configuration, automatically +Bitbucket Cloud can be used to import Terraform configuration, automatically queuing runs when changes are merged into a repository's default branch. Additionally, plans are run when a pull request is created or updated. Terraform Enterprise will update the pull request with the result of the Terraform plan @@ -15,7 +15,7 @@ providing quick feedback on proposed changes. ## Registering an OAuth Application & Client -### Creating and Updating a BitBucket Cloud OAuth Application +### Creating and Updating a Bitbucket Cloud OAuth Application You will need to register Terraform Enterprise as an OAuth Application within your Bitbucket Cloud account. Proceed to https://bitbucket.org/account/user/your-username/oauth-consumers/new. Fill out the required information and set the Redirect URI to a placeholder (ie: http://example.com), as you will need to register the Bitbucket Client with Terraform Enterprise prior to receiving this value. Check all of the permission fields that apply to you, and click Save @@ -31,7 +31,7 @@ Once you have created your client, you will be redirected back to the configurat Your OAuth Client should now be enabled for your Organization to use within Terraform Enterprise. -## Using Terraform Enterprise with BitBucket Cloud +## Using Terraform Enterprise with Bitbucket Cloud There are two ways to connect your preferred VCS Host to Terraform Enterprise. You can generate an OAuth token both at the user and organization level. @@ -52,9 +52,9 @@ You are now ready to use your personal token to manage builds and configurations ## Connecting Configurations -Once you have linked a BitBucket installation to your account or organization, +Once you have linked a Bitbucket installation to your account or organization, you are ready to begin creating Packer Builds and Terraform Enviroments linked -to your desired BitBucket Cloud repository. +to your desired Bitbucket Cloud repository. Terraform Enterprise environments are linked to individual GitHub repositories. However, a single GitHub repository can be linked to multiple environments @@ -65,19 +65,19 @@ Environments can be linked when they're initially created using the New Environment process. Existing environments can be linked by setting GitHub details in their **Integrations**. -To link a Terraform Enterprise environment to a BitBucket Cloud repository, you need +To link a Terraform Enterprise environment to a Bitbucket Cloud repository, you need three pieces of information: -- **BitBucket Cloud repository** - The location of the repository being imported in the +- **Bitbucket Cloud repository** - The location of the repository being imported in the format _username/repository_. -- **BitBucket Cloud branch** - The branch from which to ingress new versions. This +- **Bitbucket Cloud branch** - The branch from which to ingress new versions. This defaults to the value GitHub provides as the default branch for this repository. - **Path to directory of Terraform files** - The repository's subdirectory that contains its terraform files. This defaults to the root of the repository. -### Connecting a BitBucket Cloud Repository to a Terraform Environment +### Connecting a Bitbucket Cloud Repository to a Terraform Environment Navigate to https://atlas.hashicorp.com/configurations/import and select Link to Bitbucket Cloud. A menu will appear asking you to name the environment. Then use the autocomplete field for repository and select the repository for which you'd like to create a webhook & environment. If necessary, fill out information about the VCS branch to pull from as well as the directory where the Terraform files live within the repository. Click Create and Continue. @@ -85,7 +85,7 @@ Upon success, you will be redirected to the environment's runs page (https://atl The events currently supported are repository and branch push, pull request, and merge. -### Connecting a BitBucket Cloud Repository to a Packer Build Configuration +### Connecting a Bitbucket Cloud Repository to a Packer Build Configuration Navigate to https://atlas.hashicorp.com/builds/new and select the organization for which you'd like to create a build configuration. Name your build & select Connect build configuration to a Git Repository. A form will appear asking you to select your Git Host. Select Bitbucket Cloud. From d157eea2e3add1bb47cb410528e9298f1e8d5c6e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 19 Apr 2017 14:56:11 -0700 Subject: [PATCH 222/342] website: Correct capitalization of "Bitbucket" Previously we fixed this specifically for the Enterprise VCS integration, but we also had some long-running errors of this sort in the docs for how to specify module sources on Bitbucket. --- website/source/docs/modules/sources.html.markdown | 8 ++++---- website/source/layouts/enterprise.erb | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/website/source/docs/modules/sources.html.markdown b/website/source/docs/modules/sources.html.markdown index 241f76310..b30cab033 100644 --- a/website/source/docs/modules/sources.html.markdown +++ b/website/source/docs/modules/sources.html.markdown @@ -17,7 +17,7 @@ Terraform supports the following sources: * GitHub - * BitBucket + * Bitbucket * Generic Git, Mercurial repositories @@ -85,9 +85,9 @@ module "private-infra" { **Note:** Terraform does not yet support interpolations in the `source` field, so the machine username and password will have to be embedded directly into the `source` string. You can track [GH-1439](https://github.com/hashicorp/terraform/issues/1439) to learn when this limitation is addressed. -## BitBucket +## Bitbucket -Terraform will automatically recognize BitBucket URLs and turn them into a link to the specific Git or Mercurial repository, for example: +Terraform will automatically recognize Bitbucket URLs and turn them into a link to the specific Git or Mercurial repository, for example: ```hcl module "consul" { @@ -105,7 +105,7 @@ module "consul" { **Note:** The double-slash, `//`, is important. It is what tells Terraform that this is the separator for a subdirectory, and not part of the repository itself. -BitBucket URLs will require that Git or Mercurial is installed on your system, depending on the type of repository. +Bitbucket URLs will require that Git or Mercurial is installed on your system, depending on the type of repository. ## Generic Git Repository diff --git a/website/source/layouts/enterprise.erb b/website/source/layouts/enterprise.erb index fb764a10f..127074249 100644 --- a/website/source/layouts/enterprise.erb +++ b/website/source/layouts/enterprise.erb @@ -117,7 +117,7 @@ GitLab
  • > - BitBucket + Bitbucket From af1628eaa4c109653b440b99be27b8fe053c83a8 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 12:14:23 -0400 Subject: [PATCH 223/342] add refreshGracePeriod Refresh calls may have side effects that need to be recorded if it succeeds, especially common when when WaitForState is called from resource.Retry. If the WaitForState timeout is reached and there is a Refresh call in-flight, wait up to refreshGracePeriod (set to 30s) for it to complete. --- helper/resource/state.go | 57 ++++++++++++++++++++++++++++++------ helper/resource/wait_test.go | 15 ++++++++++ 2 files changed, 63 insertions(+), 9 deletions(-) diff --git a/helper/resource/state.go b/helper/resource/state.go index dd5bfd4b6..285926b11 100644 --- a/helper/resource/state.go +++ b/helper/resource/state.go @@ -5,6 +5,8 @@ import ( "time" ) +var refreshGracePeriod = 30 * time.Second + // StateRefreshFunc is a function type used for StateChangeConf that is // responsible for refreshing the item being watched for a state change. // @@ -68,11 +70,13 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { Done bool } - // read ever result from the refresh loop, waiting for a positive result.Done + // Read every result from the refresh loop, waiting for a positive result.Done. resCh := make(chan Result, 1) // cancellation channel for the refresh loop cancelCh := make(chan struct{}) + result := Result{} + go func() { defer close(resCh) @@ -82,6 +86,9 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { var wait time.Duration for { + // store the last result + resCh <- result + // wait and watch for cancellation select { case <-cancelCh: @@ -94,14 +101,14 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } res, currentState, err := conf.Refresh() - result := Result{ + result = Result{ Result: res, State: currentState, Error: err, } - resCh <- result if err != nil { + resCh <- result return } @@ -167,6 +174,12 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } } + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + // If a poll interval has been specified, choose that interval. // Otherwise bound the default value. if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { @@ -177,12 +190,6 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } else if wait > 10*time.Second { wait = 10 * time.Second } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } } log.Printf("[TRACE] Waiting %s before next try", wait) @@ -210,7 +217,39 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { lastResult = r case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + return nil, &TimeoutError{ LastError: lastResult.Error, LastState: lastResult.State, diff --git a/helper/resource/wait_test.go b/helper/resource/wait_test.go index bb17d9fe4..957bd1842 100644 --- a/helper/resource/wait_test.go +++ b/helper/resource/wait_test.go @@ -25,6 +25,21 @@ func TestRetry(t *testing.T) { } } +// make sure a slow StateRefreshFunc is allowed to complete after timeout +func TestRetry_grace(t *testing.T) { + t.Parallel() + + f := func() *RetryError { + time.Sleep(1 * time.Second) + return nil + } + + err := Retry(10*time.Millisecond, f) + if err != nil { + t.Fatalf("err: %s", err) + } +} + func TestRetry_timeout(t *testing.T) { t.Parallel() From eb4b45941c4c81180f42e30889e6037f05a0d2f4 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 14:19:08 -0400 Subject: [PATCH 224/342] fix tests affected by refreshGracePeriod A couple tests require lowering the grace period to keep the test from taking the full 30s timeout. The Retry_hang test also needed to be removed from the Parallel group, becuase it modifies the global refreshGracePeriod variable. --- helper/resource/state_test.go | 6 ++++++ helper/resource/wait_test.go | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/helper/resource/state_test.go b/helper/resource/state_test.go index 5e0cbe2dc..ca534589c 100644 --- a/helper/resource/state_test.go +++ b/helper/resource/state_test.go @@ -147,6 +147,12 @@ func TestWaitForState_inconsistent_negative(t *testing.T) { } func TestWaitForState_timeout(t *testing.T) { + old := refreshGracePeriod + refreshGracePeriod = 5 * time.Millisecond + defer func() { + refreshGracePeriod = old + }() + conf := &StateChangeConf{ Pending: []string{"pending", "incomplete"}, Target: []string{"running"}, diff --git a/helper/resource/wait_test.go b/helper/resource/wait_test.go index 957bd1842..526b21ae3 100644 --- a/helper/resource/wait_test.go +++ b/helper/resource/wait_test.go @@ -54,14 +54,18 @@ func TestRetry_timeout(t *testing.T) { } func TestRetry_hang(t *testing.T) { - t.Parallel() + old := refreshGracePeriod + refreshGracePeriod = 50 * time.Millisecond + defer func() { + refreshGracePeriod = old + }() f := func() *RetryError { time.Sleep(2 * time.Second) return nil } - err := Retry(1*time.Second, f) + err := Retry(50*time.Millisecond, f) if err == nil { t.Fatal("should error") } From 14bea66f4ee3c09dff772beb407343f4fa11f5db Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 18:06:41 -0400 Subject: [PATCH 225/342] add test for proper cancelation --- helper/resource/state_test.go | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/helper/resource/state_test.go b/helper/resource/state_test.go index ca534589c..6d6b329a1 100644 --- a/helper/resource/state_test.go +++ b/helper/resource/state_test.go @@ -171,6 +171,62 @@ func TestWaitForState_timeout(t *testing.T) { t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) } + if obj != nil { + t.Fatalf("should not return obj") + } +} + +// Make sure a timeout actually cancels the refresh goroutine and waits for its +// return. +func TestWaitForState_cancel(t *testing.T) { + // make this refresh func block until we cancel it + cancel := make(chan struct{}) + refresh := func() (interface{}, string, error) { + <-cancel + return nil, "pending", nil + } + conf := &StateChangeConf{ + Pending: []string{"pending", "incomplete"}, + Target: []string{"running"}, + Refresh: refresh, + Timeout: 10 * time.Millisecond, + PollInterval: 10 * time.Second, + } + + var obj interface{} + var err error + + waitDone := make(chan struct{}) + go func() { + defer close(waitDone) + obj, err = conf.WaitForState() + }() + + // make sure WaitForState is blocked + select { + case <-waitDone: + t.Fatal("WaitForState returned too early") + case <-time.After(10 * time.Millisecond): + } + + // unlock the refresh function + close(cancel) + // make sure WaitForState returns + select { + case <-waitDone: + case <-time.After(time.Second): + t.Fatal("WaitForState didn't return after refresh finished") + } + + if err == nil { + t.Fatal("Expected timeout error. No error returned.") + } + + expectedErr := "timeout while waiting for state to become 'running'" + if !strings.HasPrefix(err.Error(), expectedErr) { + t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) + } + if obj != nil { t.Fatalf("should not return obj") } From 4c3a053f0cf59ac9d7ab15cbcfbee50f788fde55 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Apr 2017 18:19:48 -0400 Subject: [PATCH 226/342] lint errors --- helper/resource/state.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/helper/resource/state.go b/helper/resource/state.go index 285926b11..37c586a11 100644 --- a/helper/resource/state.go +++ b/helper/resource/state.go @@ -114,20 +114,19 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { // If we're waiting for the absence of a thing, then return if res == nil && len(conf.Target) == 0 { - targetOccurence += 1 + targetOccurence++ if conf.ContinuousTargetOccurence == targetOccurence { result.Done = true resCh <- result return - } else { - continue } + continue } if res == nil { // If we didn't find the resource, check if we have been // not finding it for awhile, and if so, report an error. - notfoundTick += 1 + notfoundTick++ if notfoundTick > conf.NotFoundChecks { result.Error = &NotFoundError{ LastError: err, @@ -144,14 +143,13 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { for _, allowed := range conf.Target { if currentState == allowed { found = true - targetOccurence += 1 + targetOccurence++ if conf.ContinuousTargetOccurence == targetOccurence { result.Done = true resCh <- result return - } else { - continue } + continue } } @@ -237,7 +235,7 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } if !ok { - // the the goroutine returned + // the goroutine returned break forSelect } From 7f3c8e47653e0eda093e525d05897b84bec9137c Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 19 Apr 2017 17:20:44 -0500 Subject: [PATCH 227/342] move this test to new region --- builtin/providers/aws/data_source_aws_route53_zone_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/data_source_aws_route53_zone_test.go b/builtin/providers/aws/data_source_aws_route53_zone_test.go index 49c684f29..0004787bf 100644 --- a/builtin/providers/aws/data_source_aws_route53_zone_test.go +++ b/builtin/providers/aws/data_source_aws_route53_zone_test.go @@ -72,7 +72,7 @@ func testAccDataSourceAwsRoute53ZoneCheck(rsName, dsName, zName string) resource func testAccDataSourceAwsRoute53ZoneConfig(rInt int) string { return fmt.Sprintf(` provider "aws" { - region = "us-east-2" + region = "us-east-1" } resource "aws_vpc" "test" { From 99a73094f7d6ee1749663219ee85dddddc3cd3ab Mon Sep 17 00:00:00 2001 From: dj80hd Date: Thu, 20 Apr 2017 00:47:44 -0500 Subject: [PATCH 228/342] Fix typo in aws-two-tier example (#13790) --- examples/aws-two-tier/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/aws-two-tier/README.md b/examples/aws-two-tier/README.md index 3c0e54bc0..3920a15f4 100644 --- a/examples/aws-two-tier/README.md +++ b/examples/aws-two-tier/README.md @@ -26,7 +26,7 @@ Run with a command like this: ``` terraform apply -var 'key_name={your_aws_key_name}' \ - -var 'public_key_path={location_of_your_key_in_your_local_machine}'` + -var 'public_key_path={location_of_your_key_in_your_local_machine}' ``` For example: From 8a701086120f160879f29800dc2c25d4ad389a12 Mon Sep 17 00:00:00 2001 From: Daniel Kats Date: Wed, 19 Apr 2017 22:48:58 -0700 Subject: [PATCH 229/342] fix a word in aws_iam_role_policy error msg (#13794) --- builtin/providers/aws/resource_aws_iam_role_policy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_iam_role_policy.go b/builtin/providers/aws/resource_aws_iam_role_policy.go index 614d8d6fb..85f4a6bc9 100644 --- a/builtin/providers/aws/resource_aws_iam_role_policy.go +++ b/builtin/providers/aws/resource_aws_iam_role_policy.go @@ -140,7 +140,7 @@ func resourceAwsIamRolePolicyDelete(d *schema.ResourceData, meta interface{}) er func resourceAwsIamRolePolicyParseId(id string) (roleName, policyName string, err error) { parts := strings.SplitN(id, ":", 2) if len(parts) != 2 { - err = fmt.Errorf("role_policy id must be of the for :") + err = fmt.Errorf("role_policy id must be of the form :") return } From 8706d2181f695ea919904291d0ac2c4c1788e417 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 20 Apr 2017 12:19:35 +0100 Subject: [PATCH 230/342] Fixing the import test by adding missing formatting values --- builtin/providers/azurerm/import_arm_subnet_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/azurerm/import_arm_subnet_test.go b/builtin/providers/azurerm/import_arm_subnet_test.go index 90cf28fe5..8384d8b3f 100644 --- a/builtin/providers/azurerm/import_arm_subnet_test.go +++ b/builtin/providers/azurerm/import_arm_subnet_test.go @@ -12,7 +12,7 @@ func TestAccAzureRMSubnet_importBasic(t *testing.T) { resourceName := "azurerm_subnet.test" ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri) + config := fmt.Sprintf(testAccAzureRMSubnet_basic, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 30b6d695a675fbccace7adf5da9b6b01d3c29004 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 20 Apr 2017 12:41:07 +0100 Subject: [PATCH 231/342] Updating to include #13791 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ed4b4a29..d600254ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ BUG FIXES: * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes [GH-13699] * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/azurerm: Locking around Network Security Group / Subnets [GH-13637] + * provider/azurerm: Locking route table on subnet create/delete [GH-13791] * provider/fastly: Fix issue with using 0 for `default_ttl` [GH-13648] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] From 72f2a3129829b265af2e038c1f83e1f27574e2f5 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Thu, 20 Apr 2017 14:05:19 +0100 Subject: [PATCH 232/342] Including #13755 in the changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d600254ba..1b43d3dc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ BUG FIXES: * provider/azurerm: azurerm_redis_cache resource missing hostname [GH-13650] * provider/azurerm: Locking around Network Security Group / Subnets [GH-13637] * provider/azurerm: Locking route table on subnet create/delete [GH-13791] + * provider/azurerm: VM's - fixes a bug where ssh_keys could contain a null entry [GH-13755] * provider/fastly: Fix issue with using 0 for `default_ttl` [GH-13648] * provider/google: Stop setting the id when project creation fails [GH-13644] * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` [GH-12972] From e667411cc5cb5ec0445602c7d135ffbfd068be7e Mon Sep 17 00:00:00 2001 From: Jearvon Dharrie Date: Thu, 20 Apr 2017 12:36:34 -0400 Subject: [PATCH 233/342] website: Fix spacing of literal "terraform console" (#13807) --- website/source/docs/commands/console.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/commands/console.html.markdown b/website/source/docs/commands/console.html.markdown index e6af2e780..e8e7778e8 100644 --- a/website/source/docs/commands/console.html.markdown +++ b/website/source/docs/commands/console.html.markdown @@ -52,7 +52,7 @@ $ echo "1 + 5" | terraform console ## Remote State -The `terraform console `command will read configured state even if it +The `terraform console` command will read configured state even if it is [remote](/docs/state/remote.html). This is great for scripting state reading in CI environments or other remote scenarios. From 9bd50a12191ee61a8568bac4ab5b6f700495fafe Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 20 Apr 2017 18:47:38 +0200 Subject: [PATCH 234/342] provider/google: BigQuery Dataset (#13436) * Vendor BigQuery * Add resource * Add tests * Add documentation * Remove named import * Remove `retain_on_delete` * Fix formatting --- builtin/providers/google/config.go | 9 + .../google/import_bigquery_dataset_test.go | 31 + builtin/providers/google/provider.go | 1 + .../google/resource_bigquery_dataset.go | 285 + .../google/resource_bigquery_dataset_test.go | 112 + .../api/bigquery/v2/bigquery-api.json | 2787 +++++++ .../api/bigquery/v2/bigquery-gen.go | 6690 +++++++++++++++++ vendor/vendor.json | 6 + .../google/r/bigquery_dataset.html.markdown | 80 + website/source/layouts/google.erb | 9 + 10 files changed, 10010 insertions(+) create mode 100644 builtin/providers/google/import_bigquery_dataset_test.go create mode 100644 builtin/providers/google/resource_bigquery_dataset.go create mode 100644 builtin/providers/google/resource_bigquery_dataset_test.go create mode 100644 vendor/google.golang.org/api/bigquery/v2/bigquery-api.json create mode 100644 vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go create mode 100644 website/source/docs/providers/google/r/bigquery_dataset.html.markdown diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 37ac2db85..9ce20633d 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -13,6 +13,7 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/bigquery/v2" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/compute/v1" @@ -42,6 +43,7 @@ type Config struct { clientSqlAdmin *sqladmin.Service clientIAM *iam.Service clientServiceMan *servicemanagement.APIService + clientBigQuery *bigquery.Service } func (c *Config) loadAndValidate() error { @@ -169,6 +171,13 @@ func (c *Config) loadAndValidate() error { } c.clientBilling.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...") + c.clientBigQuery, err = bigquery.New(client) + if err != nil { + return err + } + c.clientBigQuery.UserAgent = userAgent + return nil } diff --git a/builtin/providers/google/import_bigquery_dataset_test.go b/builtin/providers/google/import_bigquery_dataset_test.go new file mode 100644 index 000000000..32f2682d4 --- /dev/null +++ b/builtin/providers/google/import_bigquery_dataset_test.go @@ -0,0 +1,31 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccBigQueryDataset_importBasic(t *testing.T) { + resourceName := "google_bigquery_dataset.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index f302e00ca..ce33f7e45 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -55,6 +55,7 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ + "google_bigquery_dataset": resourceBigQueryDataset(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), "google_compute_backend_service": resourceComputeBackendService(), diff --git a/builtin/providers/google/resource_bigquery_dataset.go b/builtin/providers/google/resource_bigquery_dataset.go new file mode 100644 index 000000000..69cfdbb4c --- /dev/null +++ b/builtin/providers/google/resource_bigquery_dataset.go @@ -0,0 +1,285 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" +) + +func resourceBigQueryDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetCreate, + Read: resourceBigQueryDatasetRead, + Update: resourceBigQueryDatasetUpdate, + Delete: resourceBigQueryDatasetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // DatasetId: [Required] A unique ID for this dataset, without the + // project name. The ID must contain only letters (a-z, A-Z), numbers + // (0-9), or underscores (_). The maximum length is 1,024 characters. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) + } + + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 1,024 characters", k)) + } + + return + }, + }, + + // ProjectId: [Optional] The ID of the project containing this dataset. + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // FriendlyName: [Optional] A descriptive name for the dataset. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Description: [Optional] A user-friendly description of the dataset. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // Location: [Experimental] The geographic location where the dataset + // should reside. Possible values include EU and US. The default value + // is US. + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "US", + ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false), + }, + + // DefaultTableExpirationMs: [Optional] The default lifetime of all + // tables in the dataset, in milliseconds. The minimum value is 3600000 + // milliseconds (one hour). Once this property is set, all newly-created + // tables in the dataset will have an expirationTime property set to the + // creation time plus the value in this property, and changing the value + // will only affect new tables, not existing ones. When the + // expirationTime for a given table is reached, that table will be + // deleted automatically. If a table's expirationTime is modified or + // removed before the table expires, or if you provide an explicit + // expirationTime when creating a table, that value takes precedence + // over the default expiration time indicated by this property. + "default_table_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 3600000 { + errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) + } + + return + }, + }, + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. You can set this + // property when inserting or updating a dataset. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + // SelfLink: [Output-only] A URL that can be used to access the resource + // again. You can use this URL in Get or Update requests to the + // resource. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Etag: [Output-only] A hash of the resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // CreationTime: [Output-only] The time when this dataset was created, + // in milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The date when this dataset or any of + // its tables was last modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + dataset := &bigquery.Dataset{ + DatasetReference: &bigquery.DatasetReference{ + DatasetId: d.Get("dataset_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("friendly_name"); ok { + dataset.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + dataset.Description = v.(string) + } + + if v, ok := d.GetOk("location"); ok { + dataset.Location = v.(string) + } + + if v, ok := d.GetOk("default_table_expiration_ms"); ok { + dataset.DefaultTableExpirationMs = int64(v.(int)) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + dataset.Labels = labels + } + + return dataset, nil +} + +func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId) + + res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery dataset %s has been created", res.Id) + + d.SetId(res.Id) + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetParseID(id string) (string, string) { + // projectID, datasetID + parts := strings.Split(id, ":") + return parts[0], parts[1] +} + +func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing BigQuery dataset %q because it's gone", datasetID) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return err + } + + d.Set("etag", res.Etag) + d.Set("labels", res.Labels) + d.Set("location", res.Location) + d.Set("self_link", res.SelfLink) + d.Set("description", res.Description) + d.Set("friendly_name", res.FriendlyName) + d.Set("creation_time", res.CreationTime) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("dataset_id", res.DatasetReference.DatasetId) + d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs) + + return nil +} + +func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil { + return err + } + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_bigquery_dataset_test.go b/builtin/providers/google/resource_bigquery_dataset_test.go new file mode 100644 index 000000000..e1032ce91 --- /dev/null +++ b/builtin/providers/google/resource_bigquery_dataset_test.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccBigQueryDataset_basic(t *testing.T) { + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + + { + Config: testAccBigQueryDatasetUpdated(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + }, + }) +} + +func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_dataset" { + continue + } + + _, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err == nil { + return fmt.Errorf("Dataset still exists") + } + } + + return nil +} + +func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Dataset not found") + } + + return nil + } +} + +func testAccBigQueryDataset(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels { + env = "foo" + default_table_expiration_ms = 3600000 + } +}`, datasetID) +} + +func testAccBigQueryDatasetUpdated(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_table_expiration_ms = 7200000 + + labels { + env = "bar" + default_table_expiration_ms = 7200000 + } +}`, datasetID) +} diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json new file mode 100644 index 000000000..93a4ec3d6 --- /dev/null +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json @@ -0,0 +1,2787 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/2pbHVnKgRBtlI769YwDOp1uiQ0w\"", + "discoveryVersion": "v1", + "id": "bigquery:v2", + "name": "bigquery", + "version": "v2", + "revision": "20170224", + "title": "BigQuery API", + "description": "A data platform for customers to create, manage, share and query data.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/search-16.gif", + "x32": "https://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/bigquery/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/bigquery/v2/", + "basePath": "/bigquery/v2/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "bigquery/v2/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/bigquery": { + "description": "View and manage your data in Google BigQuery" + }, + "https://www.googleapis.com/auth/bigquery.insertdata": { + "description": "Insert data into Google BigQuery" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "BigtableColumn": { + "id": "BigtableColumn", + "type": "object", + "properties": { + "encoding": { + "type": "string", + "description": "[Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels." + }, + "fieldName": { + "type": "string", + "description": "[Optional] If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries." + }, + "onlyReadLatest": { + "type": "boolean", + "description": "[Optional] If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels." + }, + "qualifierEncoded": { + "type": "string", + "description": "[Required] Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifier_string field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.", + "format": "byte" + }, + "qualifierString": { + "type": "string" + }, + "type": { + "type": "string", + "description": "[Optional] The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels." + } + } + }, + "BigtableColumnFamily": { + "id": "BigtableColumnFamily", + "type": "object", + "properties": { + "columns": { + "type": "array", + "description": "[Optional] Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as .. Other columns can be accessed as a list through .Column field.", + "items": { + "$ref": "BigtableColumn" + } + }, + "encoding": { + "type": "string", + "description": "[Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it." + }, + "familyId": { + "type": "string", + "description": "Identifier of the column family." + }, + "onlyReadLatest": { + "type": "boolean", + "description": "[Optional] If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column." + }, + "type": { + "type": "string", + "description": "[Optional] The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it." + } + } + }, + "BigtableOptions": { + "id": "BigtableOptions", + "type": "object", + "properties": { + "columnFamilies": { + "type": "array", + "description": "[Optional] List of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.", + "items": { + "$ref": "BigtableColumnFamily" + } + }, + "ignoreUnspecifiedColumnFamilies": { + "type": "boolean", + "description": "[Optional] If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false." + }, + "readRowkeyAsString": { + "type": "boolean", + "description": "[Optional] If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false." + } + } + }, + "CsvOptions": { + "id": "CsvOptions", + "type": "object", + "properties": { + "allowJaggedRows": { + "type": "boolean", + "description": "[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false." + }, + "allowQuotedNewlines": { + "type": "boolean", + "description": "[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false." + }, + "encoding": { + "type": "string", + "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties." + }, + "fieldDelimiter": { + "type": "string", + "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')." + }, + "quote": { + "type": "string", + "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.", + "default": "\"", + "pattern": ".?" + }, + "skipLeadingRows": { + "type": "string", + "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.", + "format": "int64" + } + } + }, + "Dataset": { + "id": "Dataset", + "type": "object", + "properties": { + "access": { + "type": "array", + "description": "[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;", + "items": { + "type": "object", + "properties": { + "domain": { + "type": "string", + "description": "[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \"example.com\"." + }, + "groupByEmail": { + "type": "string", + "description": "[Pick one] An email address of a Google Group to grant access to." + }, + "role": { + "type": "string", + "description": "[Required] Describes the rights granted to the user specified by the other member of the access object. The following string values are supported: READER, WRITER, OWNER." + }, + "specialGroup": { + "type": "string", + "description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users." + }, + "userByEmail": { + "type": "string", + "description": "[Pick one] An email address of a user to grant access to. For example: fred@example.com." + }, + "view": { + "$ref": "TableReference", + "description": "[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation." + } + } + } + }, + "creationTime": { + "type": "string", + "description": "[Output-only] The time when this dataset was created, in milliseconds since the epoch.", + "format": "int64" + }, + "datasetReference": { + "$ref": "DatasetReference", + "description": "[Required] A reference that identifies the dataset." + }, + "defaultTableExpirationMs": { + "type": "string", + "description": "[Optional] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.", + "format": "int64" + }, + "description": { + "type": "string", + "description": "[Optional] A user-friendly description of the dataset." + }, + "etag": { + "type": "string", + "description": "[Output-only] A hash of the resource." + }, + "friendlyName": { + "type": "string", + "description": "[Optional] A descriptive name for the dataset." + }, + "id": { + "type": "string", + "description": "[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field." + }, + "kind": { + "type": "string", + "description": "[Output-only] The resource type.", + "default": "bigquery#dataset" + }, + "labels": { + "type": "object", + "description": "[Experimental] The labels associated with this dataset. You can use these to organize and group your datasets. You can set this property when inserting or updating a dataset. See Labeling Datasets for more information.", + "additionalProperties": { + "type": "string" + } + }, + "lastModifiedTime": { + "type": "string", + "description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.", + "format": "int64" + }, + "location": { + "type": "string", + "description": "[Experimental] The geographic location where the dataset should reside. Possible values include EU and US. The default value is US." + }, + "selfLink": { + "type": "string", + "description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource." + } + } + }, + "DatasetList": { + "id": "DatasetList", + "type": "object", + "properties": { + "datasets": { + "type": "array", + "description": "An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.", + "items": { + "type": "object", + "properties": { + "datasetReference": { + "$ref": "DatasetReference", + "description": "The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID." + }, + "friendlyName": { + "type": "string", + "description": "A descriptive name for the dataset, if one exists." + }, + "id": { + "type": "string", + "description": "The fully-qualified, unique, opaque ID of the dataset." + }, + "kind": { + "type": "string", + "description": "The resource type. This property always returns the value \"bigquery#dataset\".", + "default": "bigquery#dataset" + }, + "labels": { + "type": "object", + "description": "[Experimental] The labels associated with this dataset. You can use these to organize and group your datasets.", + "additionalProperties": { + "type": "string" + } + } + } + } + }, + "etag": { + "type": "string", + "description": "A hash value of the results page. You can use this property to determine if the page has changed since the last request." + }, + "kind": { + "type": "string", + "description": "The list type. This property always returns the value \"bigquery#datasetList\".", + "default": "bigquery#datasetList" + }, + "nextPageToken": { + "type": "string", + "description": "A token that can be used to request the next results page. This property is omitted on the final results page." + } + } + }, + "DatasetReference": { + "id": "DatasetReference", + "type": "object", + "properties": { + "datasetId": { + "type": "string", + "description": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", + "annotations": { + "required": [ + "bigquery.datasets.update" + ] + } + }, + "projectId": { + "type": "string", + "description": "[Optional] The ID of the project containing this dataset.", + "annotations": { + "required": [ + "bigquery.datasets.update" + ] + } + } + } + }, + "ErrorProto": { + "id": "ErrorProto", + "type": "object", + "properties": { + "debugInfo": { + "type": "string", + "description": "Debugging information. This property is internal to Google and should not be used." + }, + "location": { + "type": "string", + "description": "Specifies where the error occurred, if present." + }, + "message": { + "type": "string", + "description": "A human-readable description of the error." + }, + "reason": { + "type": "string", + "description": "A short error code that summarizes the error." + } + } + }, + "ExplainQueryStage": { + "id": "ExplainQueryStage", + "type": "object", + "properties": { + "computeRatioAvg": { + "type": "number", + "description": "Relative amount of time the average shard spent on CPU-bound tasks.", + "format": "double" + }, + "computeRatioMax": { + "type": "number", + "description": "Relative amount of time the slowest shard spent on CPU-bound tasks.", + "format": "double" + }, + "id": { + "type": "string", + "description": "Unique ID for stage within plan.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "Human-readable name for stage." + }, + "readRatioAvg": { + "type": "number", + "description": "Relative amount of time the average shard spent reading input.", + "format": "double" + }, + "readRatioMax": { + "type": "number", + "description": "Relative amount of time the slowest shard spent reading input.", + "format": "double" + }, + "recordsRead": { + "type": "string", + "description": "Number of records read into the stage.", + "format": "int64" + }, + "recordsWritten": { + "type": "string", + "description": "Number of records written by the stage.", + "format": "int64" + }, + "status": { + "type": "string", + "description": "Current status for the stage." + }, + "steps": { + "type": "array", + "description": "List of operations within the stage in dependency order (approximately chronological).", + "items": { + "$ref": "ExplainQueryStep" + } + }, + "waitRatioAvg": { + "type": "number", + "description": "Relative amount of time the average shard spent waiting to be scheduled.", + "format": "double" + }, + "waitRatioMax": { + "type": "number", + "description": "Relative amount of time the slowest shard spent waiting to be scheduled.", + "format": "double" + }, + "writeRatioAvg": { + "type": "number", + "description": "Relative amount of time the average shard spent on writing output.", + "format": "double" + }, + "writeRatioMax": { + "type": "number", + "description": "Relative amount of time the slowest shard spent on writing output.", + "format": "double" + } + } + }, + "ExplainQueryStep": { + "id": "ExplainQueryStep", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Machine-readable operation type." + }, + "substeps": { + "type": "array", + "description": "Human-readable stage descriptions.", + "items": { + "type": "string" + } + } + } + }, + "ExternalDataConfiguration": { + "id": "ExternalDataConfiguration", + "type": "object", + "properties": { + "autodetect": { + "type": "boolean", + "description": "[Experimental] Try to detect schema and format options automatically. Any option specified explicitly will be honored." + }, + "bigtableOptions": { + "$ref": "BigtableOptions", + "description": "[Optional] Additional options if sourceFormat is set to BIGTABLE." + }, + "compression": { + "type": "string", + "description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats." + }, + "csvOptions": { + "$ref": "CsvOptions", + "description": "Additional properties to set if sourceFormat is set to CSV." + }, + "googleSheetsOptions": { + "$ref": "GoogleSheetsOptions", + "description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS." + }, + "ignoreUnknownValues": { + "type": "boolean", + "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored." + }, + "maxBadRecords": { + "type": "integer", + "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.", + "format": "int32" + }, + "schema": { + "$ref": "TableSchema", + "description": "[Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats." + }, + "sourceFormat": { + "type": "string", + "description": "[Required] The data format. For CSV files, specify \"CSV\". For Google sheets, specify \"GOOGLE_SHEETS\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro files, specify \"AVRO\". For Google Cloud Datastore backups, specify \"DATASTORE_BACKUP\". [Experimental] For Google Cloud Bigtable, specify \"BIGTABLE\". Please note that reading from Google Cloud Bigtable is experimental and has to be enabled for your project. Please contact Google Cloud Support to enable this for your project." + }, + "sourceUris": { + "type": "array", + "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified, and it must end with '.backup_info'. Also, the '*' wildcard character is not allowed.", + "items": { + "type": "string" + } + } + } + }, + "GetQueryResultsResponse": { + "id": "GetQueryResultsResponse", + "type": "object", + "properties": { + "cacheHit": { + "type": "boolean", + "description": "Whether the query result was fetched from the query cache." + }, + "errors": { + "type": "array", + "description": "[Output-only] All errors and warnings encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.", + "items": { + "$ref": "ErrorProto" + } + }, + "etag": { + "type": "string", + "description": "A hash of this response." + }, + "jobComplete": { + "type": "boolean", + "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available." + }, + "jobReference": { + "$ref": "JobReference", + "description": "Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)." + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#getQueryResultsResponse" + }, + "numDmlAffectedRows": { + "type": "string", + "description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.", + "format": "int64" + }, + "pageToken": { + "type": "string", + "description": "A token used for paging results." + }, + "rows": { + "type": "array", + "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.", + "items": { + "$ref": "TableRow" + } + }, + "schema": { + "$ref": "TableSchema", + "description": "The schema of the results. Present only when the query completes successfully." + }, + "totalBytesProcessed": { + "type": "string", + "description": "The total number of bytes processed for this query.", + "format": "int64" + }, + "totalRows": { + "type": "string", + "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.", + "format": "uint64" + } + } + }, + "GoogleSheetsOptions": { + "id": "GoogleSheetsOptions", + "type": "object", + "properties": { + "skipLeadingRows": { + "type": "string", + "description": "[Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N \u003e 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.", + "format": "int64" + } + } + }, + "Job": { + "id": "Job", + "type": "object", + "properties": { + "configuration": { + "$ref": "JobConfiguration", + "description": "[Required] Describes the job configuration." + }, + "etag": { + "type": "string", + "description": "[Output-only] A hash of this resource." + }, + "id": { + "type": "string", + "description": "[Output-only] Opaque ID field of the job" + }, + "jobReference": { + "$ref": "JobReference", + "description": "[Optional] Reference describing the unique-per-user name of the job." + }, + "kind": { + "type": "string", + "description": "[Output-only] The type of the resource.", + "default": "bigquery#job" + }, + "selfLink": { + "type": "string", + "description": "[Output-only] A URL that can be used to access this resource again." + }, + "statistics": { + "$ref": "JobStatistics", + "description": "[Output-only] Information about the job, including starting time and ending time of the job." + }, + "status": { + "$ref": "JobStatus", + "description": "[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete." + }, + "user_email": { + "type": "string", + "description": "[Output-only] Email address of the user who ran the job." + } + } + }, + "JobCancelResponse": { + "id": "JobCancelResponse", + "type": "object", + "properties": { + "job": { + "$ref": "Job", + "description": "The final state of the job." + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#jobCancelResponse" + } + } + }, + "JobConfiguration": { + "id": "JobConfiguration", + "type": "object", + "properties": { + "copy": { + "$ref": "JobConfigurationTableCopy", + "description": "[Pick one] Copies a table." + }, + "dryRun": { + "type": "boolean", + "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined." + }, + "extract": { + "$ref": "JobConfigurationExtract", + "description": "[Pick one] Configures an extract job." + }, + "labels": { + "type": "object", + "description": "[Experimental] The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.", + "additionalProperties": { + "type": "string" + } + }, + "load": { + "$ref": "JobConfigurationLoad", + "description": "[Pick one] Configures a load job." + }, + "query": { + "$ref": "JobConfigurationQuery", + "description": "[Pick one] Configures a query job." + } + } + }, + "JobConfigurationExtract": { + "id": "JobConfigurationExtract", + "type": "object", + "properties": { + "compression": { + "type": "string", + "description": "[Optional] The compression type to use for exported files. Possible values include GZIP and NONE. The default value is NONE." + }, + "destinationFormat": { + "type": "string", + "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with nested or repeated fields cannot be exported as CSV." + }, + "destinationUri": { + "type": "string", + "description": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written." + }, + "destinationUris": { + "type": "array", + "description": "[Pick one] A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.", + "items": { + "type": "string" + } + }, + "fieldDelimiter": { + "type": "string", + "description": "[Optional] Delimiter to use between fields in the exported data. Default is ','" + }, + "printHeader": { + "type": "boolean", + "description": "[Optional] Whether to print out a header row in the results. Default is true.", + "default": "true" + }, + "sourceTable": { + "$ref": "TableReference", + "description": "[Required] A reference to the table being exported." + } + } + }, + "JobConfigurationLoad": { + "id": "JobConfigurationLoad", + "type": "object", + "properties": { + "allowJaggedRows": { + "type": "boolean", + "description": "[Optional] Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats." + }, + "allowQuotedNewlines": { + "type": "boolean", + "description": "Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false." + }, + "autodetect": { + "type": "boolean", + "description": "[Experimental] Indicates if we should automatically infer the options and schema for CSV and JSON sources." + }, + "createDisposition": { + "type": "string", + "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." + }, + "destinationTable": { + "$ref": "TableReference", + "description": "[Required] The destination table to load the data into." + }, + "encoding": { + "type": "string", + "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties." + }, + "fieldDelimiter": { + "type": "string", + "description": "[Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')." + }, + "ignoreUnknownValues": { + "type": "boolean", + "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names" + }, + "maxBadRecords": { + "type": "integer", + "description": "[Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.", + "format": "int32" + }, + "nullMarker": { + "type": "string", + "description": "[Optional] Specifies a string that represents a null value in a CSV file. For example, if you specify \"\\N\", BigQuery interprets \"\\N\" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery still interprets the empty string as a null value for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value." + }, + "projectionFields": { + "type": "array", + "description": "[Experimental] If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.", + "items": { + "type": "string" + } + }, + "quote": { + "type": "string", + "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.", + "default": "\"", + "pattern": ".?" + }, + "schema": { + "$ref": "TableSchema", + "description": "[Optional] The schema for the destination table. The schema can be omitted if the destination table already exists, or if you're loading data from Google Cloud Datastore." + }, + "schemaInline": { + "type": "string", + "description": "[Deprecated] The inline schema. For CSV schemas, specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING, bar:INTEGER, baz:FLOAT\"." + }, + "schemaInlineFormat": { + "type": "string", + "description": "[Deprecated] The format of the schemaInline property." + }, + "schemaUpdateOptions": { + "type": "array", + "description": "[Experimental] Allows the schema of the desitination table to be updated as a side effect of the load job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.", + "items": { + "type": "string" + } + }, + "skipLeadingRows": { + "type": "integer", + "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.", + "format": "int32" + }, + "sourceFormat": { + "type": "string", + "description": "[Optional] The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro, specify \"AVRO\". The default value is CSV." + }, + "sourceUris": { + "type": "array", + "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.", + "items": { + "type": "string" + } + }, + "writeDisposition": { + "type": "string", + "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." + } + } + }, + "JobConfigurationQuery": { + "id": "JobConfigurationQuery", + "type": "object", + "properties": { + "allowLargeResults": { + "type": "boolean", + "description": "If true, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set." + }, + "createDisposition": { + "type": "string", + "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." + }, + "defaultDataset": { + "$ref": "DatasetReference", + "description": "[Optional] Specifies the default dataset to use for unqualified table names in the query." + }, + "destinationTable": { + "$ref": "TableReference", + "description": "[Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results." + }, + "flattenResults": { + "type": "boolean", + "description": "[Optional] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false.", + "default": "true" + }, + "maximumBillingTier": { + "type": "integer", + "description": "[Optional] Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.", + "default": "1", + "format": "int32" + }, + "maximumBytesBilled": { + "type": "string", + "description": "[Optional] Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.", + "format": "int64" + }, + "parameterMode": { + "type": "string", + "description": "[Experimental] Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query." + }, + "preserveNulls": { + "type": "boolean", + "description": "[Deprecated] This property is deprecated." + }, + "priority": { + "type": "string", + "description": "[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE." + }, + "query": { + "type": "string", + "description": "[Required] BigQuery SQL query to execute." + }, + "queryParameters": { + "type": "array", + "description": "Query parameters for standard SQL queries.", + "items": { + "$ref": "QueryParameter" + } + }, + "schemaUpdateOptions": { + "type": "array", + "description": "[Experimental] Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.", + "items": { + "type": "string" + } + }, + "tableDefinitions": { + "type": "object", + "description": "[Optional] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.", + "additionalProperties": { + "$ref": "ExternalDataConfiguration" + } + }, + "useLegacySql": { + "type": "boolean", + "description": "Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the values of allowLargeResults and flattenResults are ignored; query will be run as if allowLargeResults is true and flattenResults is false." + }, + "useQueryCache": { + "type": "boolean", + "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.", + "default": "true" + }, + "userDefinedFunctionResources": { + "type": "array", + "description": "[Experimental] Describes user-defined function resources used in the query.", + "items": { + "$ref": "UserDefinedFunctionResource" + } + }, + "writeDisposition": { + "type": "string", + "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." + } + } + }, + "JobConfigurationTableCopy": { + "id": "JobConfigurationTableCopy", + "type": "object", + "properties": { + "createDisposition": { + "type": "string", + "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion." + }, + "destinationTable": { + "$ref": "TableReference", + "description": "[Required] The destination table" + }, + "sourceTable": { + "$ref": "TableReference", + "description": "[Pick one] Source table to copy." + }, + "sourceTables": { + "type": "array", + "description": "[Pick one] Source tables to copy.", + "items": { + "$ref": "TableReference" + } + }, + "writeDisposition": { + "type": "string", + "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion." + } + } + }, + "JobList": { + "id": "JobList", + "type": "object", + "properties": { + "etag": { + "type": "string", + "description": "A hash of this page of results." + }, + "jobs": { + "type": "array", + "description": "List of jobs that were requested.", + "items": { + "type": "object", + "properties": { + "configuration": { + "$ref": "JobConfiguration", + "description": "[Full-projection-only] Specifies the job configuration." + }, + "errorResult": { + "$ref": "ErrorProto", + "description": "A result object that will be present only if the job has failed." + }, + "id": { + "type": "string", + "description": "Unique opaque ID of the job." + }, + "jobReference": { + "$ref": "JobReference", + "description": "Job reference uniquely identifying the job." + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "bigquery#job" + }, + "state": { + "type": "string", + "description": "Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed." + }, + "statistics": { + "$ref": "JobStatistics", + "description": "[Output-only] Information about the job, including starting time and ending time of the job." + }, + "status": { + "$ref": "JobStatus", + "description": "[Full-projection-only] Describes the state of the job." + }, + "user_email": { + "type": "string", + "description": "[Full-projection-only] Email address of the user who ran the job." + } + } + } + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#jobList" + }, + "nextPageToken": { + "type": "string", + "description": "A token to request the next page of results." + } + } + }, + "JobReference": { + "id": "JobReference", + "type": "object", + "properties": { + "jobId": { + "type": "string", + "description": "[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.", + "annotations": { + "required": [ + "bigquery.jobs.getQueryResults" + ] + } + }, + "projectId": { + "type": "string", + "description": "[Required] The ID of the project containing this job.", + "annotations": { + "required": [ + "bigquery.jobs.getQueryResults" + ] + } + } + } + }, + "JobStatistics": { + "id": "JobStatistics", + "type": "object", + "properties": { + "creationTime": { + "type": "string", + "description": "[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.", + "format": "int64" + }, + "endTime": { + "type": "string", + "description": "[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.", + "format": "int64" + }, + "extract": { + "$ref": "JobStatistics4", + "description": "[Output-only] Statistics for an extract job." + }, + "load": { + "$ref": "JobStatistics3", + "description": "[Output-only] Statistics for a load job." + }, + "query": { + "$ref": "JobStatistics2", + "description": "[Output-only] Statistics for a query job." + }, + "startTime": { + "type": "string", + "description": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.", + "format": "int64" + }, + "totalBytesProcessed": { + "type": "string", + "description": "[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.", + "format": "int64" + } + } + }, + "JobStatistics2": { + "id": "JobStatistics2", + "type": "object", + "properties": { + "billingTier": { + "type": "integer", + "description": "[Output-only] Billing tier for the job.", + "format": "int32" + }, + "cacheHit": { + "type": "boolean", + "description": "[Output-only] Whether the query result was fetched from the query cache." + }, + "numDmlAffectedRows": { + "type": "string", + "description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.", + "format": "int64" + }, + "queryPlan": { + "type": "array", + "description": "[Output-only, Experimental] Describes execution plan for the query.", + "items": { + "$ref": "ExplainQueryStage" + } + }, + "referencedTables": { + "type": "array", + "description": "[Output-only, Experimental] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list.", + "items": { + "$ref": "TableReference" + } + }, + "schema": { + "$ref": "TableSchema", + "description": "[Output-only, Experimental] The schema of the results. Present only for successful dry run of non-legacy SQL queries." + }, + "statementType": { + "type": "string", + "description": "[Output-only, Experimental] The type of query statement, if valid." + }, + "totalBytesBilled": { + "type": "string", + "description": "[Output-only] Total bytes billed for the job.", + "format": "int64" + }, + "totalBytesProcessed": { + "type": "string", + "description": "[Output-only] Total bytes processed for the job.", + "format": "int64" + }, + "undeclaredQueryParameters": { + "type": "array", + "description": "[Output-only, Experimental] Standard SQL only: list of undeclared query parameters detected during a dry run validation.", + "items": { + "$ref": "QueryParameter" + } + } + } + }, + "JobStatistics3": { + "id": "JobStatistics3", + "type": "object", + "properties": { + "inputFileBytes": { + "type": "string", + "description": "[Output-only] Number of bytes of source data in a load job.", + "format": "int64" + }, + "inputFiles": { + "type": "string", + "description": "[Output-only] Number of source files in a load job.", + "format": "int64" + }, + "outputBytes": { + "type": "string", + "description": "[Output-only] Size of the loaded data in bytes. Note that while a load job is in the running state, this value may change.", + "format": "int64" + }, + "outputRows": { + "type": "string", + "description": "[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.", + "format": "int64" + } + } + }, + "JobStatistics4": { + "id": "JobStatistics4", + "type": "object", + "properties": { + "destinationUriFileCounts": { + "type": "array", + "description": "[Output-only] Number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field.", + "items": { + "type": "string", + "format": "int64" + } + } + } + }, + "JobStatus": { + "id": "JobStatus", + "type": "object", + "properties": { + "errorResult": { + "$ref": "ErrorProto", + "description": "[Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful." + }, + "errors": { + "type": "array", + "description": "[Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.", + "items": { + "$ref": "ErrorProto" + } + }, + "state": { + "type": "string", + "description": "[Output-only] Running state of the job." + } + } + }, + "JsonObject": { + "id": "JsonObject", + "type": "object", + "description": "Represents a single JSON object.", + "additionalProperties": { + "$ref": "JsonValue" + } + }, + "JsonValue": { + "id": "JsonValue", + "type": "any" + }, + "ProjectList": { + "id": "ProjectList", + "type": "object", + "properties": { + "etag": { + "type": "string", + "description": "A hash of the page of results" + }, + "kind": { + "type": "string", + "description": "The type of list.", + "default": "bigquery#projectList" + }, + "nextPageToken": { + "type": "string", + "description": "A token to request the next page of results." + }, + "projects": { + "type": "array", + "description": "Projects to which you have at least READ access.", + "items": { + "type": "object", + "properties": { + "friendlyName": { + "type": "string", + "description": "A descriptive name for this project." + }, + "id": { + "type": "string", + "description": "An opaque ID of this project." + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "bigquery#project" + }, + "numericId": { + "type": "string", + "description": "The numeric ID of this project.", + "format": "uint64" + }, + "projectReference": { + "$ref": "ProjectReference", + "description": "A unique reference to this project." + } + } + } + }, + "totalItems": { + "type": "integer", + "description": "The total number of projects in the list.", + "format": "int32" + } + } + }, + "ProjectReference": { + "id": "ProjectReference", + "type": "object", + "properties": { + "projectId": { + "type": "string", + "description": "[Required] ID of the project. Can be either the numeric ID or the assigned ID of the project." + } + } + }, + "QueryParameter": { + "id": "QueryParameter", + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "[Optional] If unset, this is a positional parameter. Otherwise, should be unique within a query." + }, + "parameterType": { + "$ref": "QueryParameterType", + "description": "[Required] The type of this parameter." + }, + "parameterValue": { + "$ref": "QueryParameterValue", + "description": "[Required] The value of this parameter." + } + } + }, + "QueryParameterType": { + "id": "QueryParameterType", + "type": "object", + "properties": { + "arrayType": { + "$ref": "QueryParameterType", + "description": "[Optional] The type of the array's elements, if this is an array." + }, + "structTypes": { + "type": "array", + "description": "[Optional] The types of the fields of this struct, in order, if this is a struct.", + "items": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "[Optional] Human-oriented description of the field." + }, + "name": { + "type": "string", + "description": "[Optional] The name of this field." + }, + "type": { + "$ref": "QueryParameterType", + "description": "[Required] The type of this field." + } + } + } + }, + "type": { + "type": "string", + "description": "[Required] The top level type of this field." + } + } + }, + "QueryParameterValue": { + "id": "QueryParameterValue", + "type": "object", + "properties": { + "arrayValues": { + "type": "array", + "description": "[Optional] The array values, if this is an array type.", + "items": { + "$ref": "QueryParameterValue" + } + }, + "structValues": { + "type": "object", + "description": "[Optional] The struct field values, in order of the struct type's declaration.", + "additionalProperties": { + "$ref": "QueryParameterValue" + } + }, + "value": { + "type": "string", + "description": "[Optional] The value of this value, if a simple scalar type." + } + } + }, + "QueryRequest": { + "id": "QueryRequest", + "type": "object", + "properties": { + "defaultDataset": { + "$ref": "DatasetReference", + "description": "[Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'." + }, + "dryRun": { + "type": "boolean", + "description": "[Optional] If set to true, BigQuery doesn't run the job. Instead, if the query is valid, BigQuery returns statistics about the job such as how many bytes would be processed. If the query is invalid, an error returns. The default value is false." + }, + "kind": { + "type": "string", + "description": "The resource type of the request.", + "default": "bigquery#queryRequest" + }, + "maxResults": { + "type": "integer", + "description": "[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.", + "format": "uint32" + }, + "parameterMode": { + "type": "string", + "description": "[Experimental] Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query." + }, + "preserveNulls": { + "type": "boolean", + "description": "[Deprecated] This property is deprecated." + }, + "query": { + "type": "string", + "description": "[Required] A query string, following the BigQuery query syntax, of the query to execute. Example: \"SELECT count(f1) FROM [myProjectId:myDatasetId.myTableId]\".", + "annotations": { + "required": [ + "bigquery.jobs.query" + ] + } + }, + "queryParameters": { + "type": "array", + "description": "[Experimental] Query parameters for Standard SQL queries.", + "items": { + "$ref": "QueryParameter" + } + }, + "timeoutMs": { + "type": "integer", + "description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).", + "format": "uint32" + }, + "useLegacySql": { + "type": "boolean", + "description": "Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the values of allowLargeResults and flattenResults are ignored; query will be run as if allowLargeResults is true and flattenResults is false.", + "default": "true" + }, + "useQueryCache": { + "type": "boolean", + "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true.", + "default": "true" + } + } + }, + "QueryResponse": { + "id": "QueryResponse", + "type": "object", + "properties": { + "cacheHit": { + "type": "boolean", + "description": "Whether the query result was fetched from the query cache." + }, + "errors": { + "type": "array", + "description": "[Output-only] All errors and warnings encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.", + "items": { + "$ref": "ErrorProto" + } + }, + "jobComplete": { + "type": "boolean", + "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available." + }, + "jobReference": { + "$ref": "JobReference", + "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)." + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "bigquery#queryResponse" + }, + "numDmlAffectedRows": { + "type": "string", + "description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.", + "format": "int64" + }, + "pageToken": { + "type": "string", + "description": "A token used for paging results." + }, + "rows": { + "type": "array", + "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.", + "items": { + "$ref": "TableRow" + } + }, + "schema": { + "$ref": "TableSchema", + "description": "The schema of the results. Present only when the query completes successfully." + }, + "totalBytesProcessed": { + "type": "string", + "description": "The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run.", + "format": "int64" + }, + "totalRows": { + "type": "string", + "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.", + "format": "uint64" + } + } + }, + "Streamingbuffer": { + "id": "Streamingbuffer", + "type": "object", + "properties": { + "estimatedBytes": { + "type": "string", + "description": "[Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.", + "format": "uint64" + }, + "estimatedRows": { + "type": "string", + "description": "[Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.", + "format": "uint64" + }, + "oldestEntryTime": { + "type": "string", + "description": "[Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.", + "format": "uint64" + } + } + }, + "Table": { + "id": "Table", + "type": "object", + "properties": { + "creationTime": { + "type": "string", + "description": "[Output-only] The time when this table was created, in milliseconds since the epoch.", + "format": "int64" + }, + "description": { + "type": "string", + "description": "[Optional] A user-friendly description of this table." + }, + "etag": { + "type": "string", + "description": "[Output-only] A hash of this resource." + }, + "expirationTime": { + "type": "string", + "description": "[Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.", + "format": "int64" + }, + "externalDataConfiguration": { + "$ref": "ExternalDataConfiguration", + "description": "[Optional] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table." + }, + "friendlyName": { + "type": "string", + "description": "[Optional] A descriptive name for this table." + }, + "id": { + "type": "string", + "description": "[Output-only] An opaque ID uniquely identifying the table." + }, + "kind": { + "type": "string", + "description": "[Output-only] The type of the resource.", + "default": "bigquery#table" + }, + "labels": { + "type": "object", + "description": "[Experimental] The labels associated with this table. You can use these to organize and group your tables. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.", + "additionalProperties": { + "type": "string" + } + }, + "lastModifiedTime": { + "type": "string", + "description": "[Output-only] The time when this table was last modified, in milliseconds since the epoch.", + "format": "uint64" + }, + "location": { + "type": "string", + "description": "[Output-only] The geographic location where the table resides. This value is inherited from the dataset." + }, + "numBytes": { + "type": "string", + "description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.", + "format": "int64" + }, + "numLongTermBytes": { + "type": "string", + "description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".", + "format": "int64" + }, + "numRows": { + "type": "string", + "description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.", + "format": "uint64" + }, + "schema": { + "$ref": "TableSchema", + "description": "[Optional] Describes the schema of this table." + }, + "selfLink": { + "type": "string", + "description": "[Output-only] A URL that can be used to access this resource again." + }, + "streamingBuffer": { + "$ref": "Streamingbuffer", + "description": "[Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer." + }, + "tableReference": { + "$ref": "TableReference", + "description": "[Required] Reference describing the ID of this table." + }, + "timePartitioning": { + "$ref": "TimePartitioning", + "description": "[Experimental] If specified, configures time-based partitioning for this table." + }, + "type": { + "type": "string", + "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. EXTERNAL: A table that references data stored in an external storage system, such as Google Cloud Storage. The default value is TABLE." + }, + "view": { + "$ref": "ViewDefinition", + "description": "[Optional] The view definition." + } + } + }, + "TableCell": { + "id": "TableCell", + "type": "object", + "properties": { + "v": { + "type": "any" + } + } + }, + "TableDataInsertAllRequest": { + "id": "TableDataInsertAllRequest", + "type": "object", + "properties": { + "ignoreUnknownValues": { + "type": "boolean", + "description": "[Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is false, which treats unknown values as errors." + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#tableDataInsertAllRequest" + }, + "rows": { + "type": "array", + "description": "The rows to insert.", + "items": { + "type": "object", + "properties": { + "insertId": { + "type": "string", + "description": "[Optional] A unique ID for each row. BigQuery uses this property to detect duplicate insertion requests on a best-effort basis." + }, + "json": { + "$ref": "JsonObject", + "description": "[Required] A JSON object that contains a row of data. The object's properties and values must match the destination table's schema." + } + } + } + }, + "skipInvalidRows": { + "type": "boolean", + "description": "[Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist." + }, + "templateSuffix": { + "type": "string", + "description": "[Experimental] If specified, treats the destination table as a base template, and inserts the rows into an instance table named \"{destination}{templateSuffix}\". BigQuery will manage creation of the instance table, using the schema of the base template table. See https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables for considerations when working with templates tables." + } + } + }, + "TableDataInsertAllResponse": { + "id": "TableDataInsertAllResponse", + "type": "object", + "properties": { + "insertErrors": { + "type": "array", + "description": "An array of errors for rows that were not inserted.", + "items": { + "type": "object", + "properties": { + "errors": { + "type": "array", + "description": "Error information for the row indicated by the index property.", + "items": { + "$ref": "ErrorProto" + } + }, + "index": { + "type": "integer", + "description": "The index of the row that error applies to.", + "format": "uint32" + } + } + } + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#tableDataInsertAllResponse" + } + } + }, + "TableDataList": { + "id": "TableDataList", + "type": "object", + "properties": { + "etag": { + "type": "string", + "description": "A hash of this page of results." + }, + "kind": { + "type": "string", + "description": "The resource type of the response.", + "default": "bigquery#tableDataList" + }, + "pageToken": { + "type": "string", + "description": "A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing." + }, + "rows": { + "type": "array", + "description": "Rows of results.", + "items": { + "$ref": "TableRow" + } + }, + "totalRows": { + "type": "string", + "description": "The total number of rows in the complete table.", + "format": "int64" + } + } + }, + "TableFieldSchema": { + "id": "TableFieldSchema", + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "[Optional] The field description. The maximum length is 16K characters." + }, + "fields": { + "type": "array", + "description": "[Optional] Describes the nested schema fields if the type property is set to RECORD.", + "items": { + "$ref": "TableFieldSchema" + } + }, + "mode": { + "type": "string", + "description": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE." + }, + "name": { + "type": "string", + "description": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters." + }, + "type": { + "type": "string", + "description": "[Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD)." + } + } + }, + "TableList": { + "id": "TableList", + "type": "object", + "properties": { + "etag": { + "type": "string", + "description": "A hash of this page of results." + }, + "kind": { + "type": "string", + "description": "The type of list.", + "default": "bigquery#tableList" + }, + "nextPageToken": { + "type": "string", + "description": "A token to request the next page of results." + }, + "tables": { + "type": "array", + "description": "Tables in the requested dataset.", + "items": { + "type": "object", + "properties": { + "friendlyName": { + "type": "string", + "description": "The user-friendly name for this table." + }, + "id": { + "type": "string", + "description": "An opaque ID of the table" + }, + "kind": { + "type": "string", + "description": "The resource type.", + "default": "bigquery#table" + }, + "labels": { + "type": "object", + "description": "[Experimental] The labels associated with this table. You can use these to organize and group your tables.", + "additionalProperties": { + "type": "string" + } + }, + "tableReference": { + "$ref": "TableReference", + "description": "A reference uniquely identifying the table." + }, + "type": { + "type": "string", + "description": "The type of table. Possible values are: TABLE, VIEW." + }, + "view": { + "type": "object", + "description": "Additional details for a view.", + "properties": { + "useLegacySql": { + "type": "boolean", + "description": "True if view is defined in legacy SQL dialect, false if in standard SQL." + } + } + } + } + } + }, + "totalItems": { + "type": "integer", + "description": "The total number of tables in the dataset.", + "format": "int32" + } + } + }, + "TableReference": { + "id": "TableReference", + "type": "object", + "properties": { + "datasetId": { + "type": "string", + "description": "[Required] The ID of the dataset containing this table.", + "annotations": { + "required": [ + "bigquery.tables.update" + ] + } + }, + "projectId": { + "type": "string", + "description": "[Required] The ID of the project containing this table.", + "annotations": { + "required": [ + "bigquery.tables.update" + ] + } + }, + "tableId": { + "type": "string", + "description": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", + "annotations": { + "required": [ + "bigquery.tables.update" + ] + } + } + } + }, + "TableRow": { + "id": "TableRow", + "type": "object", + "properties": { + "f": { + "type": "array", + "description": "Represents a single row in the result set, consisting of one or more fields.", + "items": { + "$ref": "TableCell" + } + } + } + }, + "TableSchema": { + "id": "TableSchema", + "type": "object", + "properties": { + "fields": { + "type": "array", + "description": "Describes the fields in a table.", + "items": { + "$ref": "TableFieldSchema" + } + } + } + }, + "TimePartitioning": { + "id": "TimePartitioning", + "type": "object", + "properties": { + "expirationMs": { + "type": "string", + "description": "[Optional] Number of milliseconds for which to keep the storage for a partition.", + "format": "int64" + }, + "type": { + "type": "string", + "description": "[Required] The only type supported is DAY, which will generate one partition per day based on data loading time." + } + } + }, + "UserDefinedFunctionResource": { + "id": "UserDefinedFunctionResource", + "type": "object", + "properties": { + "inlineCode": { + "type": "string", + "description": "[Pick one] An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code." + }, + "resourceUri": { + "type": "string", + "description": "[Pick one] A code resource to load from a Google Cloud Storage URI (gs://bucket/path)." + } + } + }, + "ViewDefinition": { + "id": "ViewDefinition", + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "[Required] A query that BigQuery executes when the view is referenced." + }, + "useLegacySql": { + "type": "boolean", + "description": "Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ Queries and views that reference this view must use the same flag value." + }, + "userDefinedFunctionResources": { + "type": "array", + "description": "[Experimental] Describes user-defined function resources used in the query.", + "items": { + "$ref": "UserDefinedFunctionResource" + } + } + } + } + }, + "resources": { + "datasets": { + "methods": { + "delete": { + "id": "bigquery.datasets.delete", + "path": "projects/{projectId}/datasets/{datasetId}", + "httpMethod": "DELETE", + "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of dataset being deleted", + "required": true, + "location": "path" + }, + "deleteContents": { + "type": "boolean", + "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "Project ID of the dataset being deleted", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "bigquery.datasets.get", + "path": "projects/{projectId}/datasets/{datasetId}", + "httpMethod": "GET", + "description": "Returns the dataset specified by datasetID.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the requested dataset", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the requested dataset", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "response": { + "$ref": "Dataset" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "insert": { + "id": "bigquery.datasets.insert", + "path": "projects/{projectId}/datasets", + "httpMethod": "POST", + "description": "Creates a new empty dataset.", + "parameters": { + "projectId": { + "type": "string", + "description": "Project ID of the new dataset", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "request": { + "$ref": "Dataset" + }, + "response": { + "$ref": "Dataset" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "id": "bigquery.datasets.list", + "path": "projects/{projectId}/datasets", + "httpMethod": "GET", + "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.", + "parameters": { + "all": { + "type": "boolean", + "description": "Whether to list all datasets, including hidden ones", + "location": "query" + }, + "filter": { + "type": "string", + "description": "An expression for filtering the results of the request by label. The syntax is \"labels.\u003cname\u003e[:\u003cvalue\u003e]\". Multiple filters can be ANDed together by connecting with a space. Example: \"labels.department:receiving labels.active\". See Filtering datasets using labels for details.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results to return", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, to request the next page of results", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "Project ID of the datasets to be listed", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "DatasetList" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "patch": { + "id": "bigquery.datasets.patch", + "path": "projects/{projectId}/datasets/{datasetId}", + "httpMethod": "PATCH", + "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the dataset being updated", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the dataset being updated", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "request": { + "$ref": "Dataset" + }, + "response": { + "$ref": "Dataset" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "id": "bigquery.datasets.update", + "path": "projects/{projectId}/datasets/{datasetId}", + "httpMethod": "PUT", + "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the dataset being updated", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the dataset being updated", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "request": { + "$ref": "Dataset" + }, + "response": { + "$ref": "Dataset" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "jobs": { + "methods": { + "cancel": { + "id": "bigquery.jobs.cancel", + "path": "projects/{projectId}/jobs/{jobId}/cancel", + "httpMethod": "POST", + "description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.", + "parameters": { + "jobId": { + "type": "string", + "description": "[Required] Job ID of the job to cancel", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "[Required] Project ID of the job to cancel", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "jobId" + ], + "response": { + "$ref": "JobCancelResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "bigquery.jobs.get", + "path": "projects/{projectId}/jobs/{jobId}", + "httpMethod": "GET", + "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", + "parameters": { + "jobId": { + "type": "string", + "description": "[Required] Job ID of the requested job", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "[Required] Project ID of the requested job", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "jobId" + ], + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "getQueryResults": { + "id": "bigquery.jobs.getQueryResults", + "path": "projects/{projectId}/queries/{jobId}", + "httpMethod": "GET", + "description": "Retrieves the results of a query job.", + "parameters": { + "jobId": { + "type": "string", + "description": "[Required] Job ID of the query job", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of results to read", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, to request the next page of results", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "[Required] Project ID of the query job", + "required": true, + "location": "path" + }, + "startIndex": { + "type": "string", + "description": "Zero-based index of the starting row", + "format": "uint64", + "location": "query" + }, + "timeoutMs": { + "type": "integer", + "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response will be false", + "format": "uint32", + "location": "query" + } + }, + "parameterOrder": [ + "projectId", + "jobId" + ], + "response": { + "$ref": "GetQueryResultsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "insert": { + "id": "bigquery.jobs.insert", + "path": "projects/{projectId}/jobs", + "httpMethod": "POST", + "description": "Starts a new asynchronous job. Requires the Can View project role.", + "parameters": { + "projectId": { + "type": "string", + "description": "Project ID of the project that will be billed for the job", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "request": { + "$ref": "Job" + }, + "response": { + "$ref": "Job" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/bigquery/v2/projects/{projectId}/jobs" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs" + } + } + } + }, + "list": { + "id": "bigquery.jobs.list", + "path": "projects/{projectId}/jobs", + "httpMethod": "GET", + "description": "Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", + "parameters": { + "allUsers": { + "type": "boolean", + "description": "Whether to display jobs owned by all users in the project. Default false", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of results to return", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, to request the next page of results", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "Project ID of the jobs to list", + "required": true, + "location": "path" + }, + "projection": { + "type": "string", + "description": "Restrict information returned to a set of selected fields", + "enum": [ + "full", + "minimal" + ], + "enumDescriptions": [ + "Includes all job data", + "Does not include the job configuration" + ], + "location": "query" + }, + "stateFilter": { + "type": "string", + "description": "Filter for job state", + "enum": [ + "done", + "pending", + "running" + ], + "enumDescriptions": [ + "Finished jobs", + "Pending jobs", + "Running jobs" + ], + "repeated": true, + "location": "query" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "JobList" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "query": { + "id": "bigquery.jobs.query", + "path": "projects/{projectId}/queries", + "httpMethod": "POST", + "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", + "parameters": { + "projectId": { + "type": "string", + "description": "Project ID of the project billed for the query", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId" + ], + "request": { + "$ref": "QueryRequest" + }, + "response": { + "$ref": "QueryResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + } + } + }, + "projects": { + "methods": { + "list": { + "id": "bigquery.projects.list", + "path": "projects", + "httpMethod": "GET", + "description": "Lists all projects to which you have been granted any project role.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "Maximum number of results to return", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, to request the next page of results", + "location": "query" + } + }, + "response": { + "$ref": "ProjectList" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + } + } + }, + "tabledata": { + "methods": { + "insertAll": { + "id": "bigquery.tabledata.insertAll", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll", + "httpMethod": "POST", + "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the destination table.", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the destination table.", + "required": true, + "location": "path" + }, + "tableId": { + "type": "string", + "description": "Table ID of the destination table.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "request": { + "$ref": "TableDataInsertAllRequest" + }, + "response": { + "$ref": "TableDataInsertAllResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/bigquery.insertdata", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "id": "bigquery.tabledata.list", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data", + "httpMethod": "GET", + "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the table to read", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of results to return", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, identifying the result set", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "Project ID of the table to read", + "required": true, + "location": "path" + }, + "startIndex": { + "type": "string", + "description": "Zero-based index of the starting row to read", + "format": "uint64", + "location": "query" + }, + "tableId": { + "type": "string", + "description": "Table ID of the table to read", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "response": { + "$ref": "TableDataList" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + } + } + }, + "tables": { + "methods": { + "delete": { + "id": "bigquery.tables.delete", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + "httpMethod": "DELETE", + "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the table to delete", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the table to delete", + "required": true, + "location": "path" + }, + "tableId": { + "type": "string", + "description": "Table ID of the table to delete", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "bigquery.tables.get", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + "httpMethod": "GET", + "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the requested table", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the requested table", + "required": true, + "location": "path" + }, + "tableId": { + "type": "string", + "description": "Table ID of the requested table", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "insert": { + "id": "bigquery.tables.insert", + "path": "projects/{projectId}/datasets/{datasetId}/tables", + "httpMethod": "POST", + "description": "Creates a new, empty table in the dataset.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the new table", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the new table", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "request": { + "$ref": "Table" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "id": "bigquery.tables.list", + "path": "projects/{projectId}/datasets/{datasetId}/tables", + "httpMethod": "GET", + "description": "Lists all tables in the specified dataset. Requires the READER dataset role.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the tables to list", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of results to return", + "format": "uint32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Page token, returned by a previous call, to request the next page of results", + "location": "query" + }, + "projectId": { + "type": "string", + "description": "Project ID of the tables to list", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId" + ], + "response": { + "$ref": "TableList" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "patch": { + "id": "bigquery.tables.patch", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + "httpMethod": "PATCH", + "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the table to update", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the table to update", + "required": true, + "location": "path" + }, + "tableId": { + "type": "string", + "description": "Table ID of the table to update", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "request": { + "$ref": "Table" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "id": "bigquery.tables.update", + "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + "httpMethod": "PUT", + "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.", + "parameters": { + "datasetId": { + "type": "string", + "description": "Dataset ID of the table to update", + "required": true, + "location": "path" + }, + "projectId": { + "type": "string", + "description": "Project ID of the table to update", + "required": true, + "location": "path" + }, + "tableId": { + "type": "string", + "description": "Table ID of the table to update", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "datasetId", + "tableId" + ], + "request": { + "$ref": "Table" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } +} diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go new file mode 100644 index 000000000..b907ac827 --- /dev/null +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -0,0 +1,6690 @@ +// Package bigquery provides access to the BigQuery API. +// +// See https://cloud.google.com/bigquery/ +// +// Usage example: +// +// import "google.golang.org/api/bigquery/v2" +// ... +// bigqueryService, err := bigquery.New(oauthHttpClient) +package bigquery // import "google.golang.org/api/bigquery/v2" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "bigquery:v2" +const apiName = "bigquery" +const apiVersion = "v2" +const basePath = "https://www.googleapis.com/bigquery/v2/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data in Google BigQuery + BigqueryScope = "https://www.googleapis.com/auth/bigquery" + + // Insert data into Google BigQuery + BigqueryInsertdataScope = "https://www.googleapis.com/auth/bigquery.insertdata" + + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Datasets = NewDatasetsService(s) + s.Jobs = NewJobsService(s) + s.Projects = NewProjectsService(s) + s.Tabledata = NewTabledataService(s) + s.Tables = NewTablesService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + GoogleClientHeaderElement string // client header fragment, for Google use only + + Datasets *DatasetsService + + Jobs *JobsService + + Projects *ProjectsService + + Tabledata *TabledataService + + Tables *TablesService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func (s *Service) clientHeader() string { + return gensupport.GoogleClientHeader("20170210", s.GoogleClientHeaderElement) +} + +func NewDatasetsService(s *Service) *DatasetsService { + rs := &DatasetsService{s: s} + return rs +} + +type DatasetsService struct { + s *Service +} + +func NewJobsService(s *Service) *JobsService { + rs := &JobsService{s: s} + return rs +} + +type JobsService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + return rs +} + +type ProjectsService struct { + s *Service +} + +func NewTabledataService(s *Service) *TabledataService { + rs := &TabledataService{s: s} + return rs +} + +type TabledataService struct { + s *Service +} + +func NewTablesService(s *Service) *TablesService { + rs := &TablesService{s: s} + return rs +} + +type TablesService struct { + s *Service +} + +type BigtableColumn struct { + // Encoding: [Optional] The encoding of the values when the type is not + // STRING. Acceptable encoding values are: TEXT - indicates values are + // alphanumeric text strings. BINARY - indicates values are encoded + // using HBase Bytes.toBytes family of functions. 'encoding' can also be + // set at the column family level. However, the setting at this level + // takes precedence if 'encoding' is set at both levels. + Encoding string `json:"encoding,omitempty"` + + // FieldName: [Optional] If the qualifier is not a valid BigQuery field + // identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid + // identifier must be provided as the column field name and is used as + // field name in queries. + FieldName string `json:"fieldName,omitempty"` + + // OnlyReadLatest: [Optional] If this is set, only the latest version of + // value in this column are exposed. 'onlyReadLatest' can also be set at + // the column family level. However, the setting at this level takes + // precedence if 'onlyReadLatest' is set at both levels. + OnlyReadLatest bool `json:"onlyReadLatest,omitempty"` + + // QualifierEncoded: [Required] Qualifier of the column. Columns in the + // parent column family that has this exact qualifier are exposed as . + // field. If the qualifier is valid UTF-8 string, it can be specified in + // the qualifier_string field. Otherwise, a base-64 encoded value must + // be set to qualifier_encoded. The column field name is the same as the + // column qualifier. However, if the qualifier is not a valid BigQuery + // field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid + // identifier must be provided as field_name. + QualifierEncoded string `json:"qualifierEncoded,omitempty"` + + QualifierString string `json:"qualifierString,omitempty"` + + // Type: [Optional] The type to convert the value in cells of this + // column. The values are expected to be encoded using HBase + // Bytes.toBytes function when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive) - BYTES STRING + // INTEGER FLOAT BOOLEAN Default type is BYTES. 'type' can also be set + // at the column family level. However, the setting at this level takes + // precedence if 'type' is set at both levels. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Encoding") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Encoding") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BigtableColumn) MarshalJSON() ([]byte, error) { + type noMethod BigtableColumn + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BigtableColumnFamily struct { + // Columns: [Optional] Lists of columns that should be exposed as + // individual fields as opposed to a list of (column name, value) pairs. + // All columns whose qualifier matches a qualifier in this list can be + // accessed as .. Other columns can be accessed as a list through + // .Column field. + Columns []*BigtableColumn `json:"columns,omitempty"` + + // Encoding: [Optional] The encoding of the values when the type is not + // STRING. Acceptable encoding values are: TEXT - indicates values are + // alphanumeric text strings. BINARY - indicates values are encoded + // using HBase Bytes.toBytes family of functions. This can be overridden + // for a specific column by listing that column in 'columns' and + // specifying an encoding for it. + Encoding string `json:"encoding,omitempty"` + + // FamilyId: Identifier of the column family. + FamilyId string `json:"familyId,omitempty"` + + // OnlyReadLatest: [Optional] If this is set only the latest version of + // value are exposed for all columns in this column family. This can be + // overridden for a specific column by listing that column in 'columns' + // and specifying a different setting for that column. + OnlyReadLatest bool `json:"onlyReadLatest,omitempty"` + + // Type: [Optional] The type to convert the value in cells of this + // column family. The values are expected to be encoded using HBase + // Bytes.toBytes function when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive) - BYTES STRING + // INTEGER FLOAT BOOLEAN Default type is BYTES. This can be overridden + // for a specific column by listing that column in 'columns' and + // specifying a type for it. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Columns") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Columns") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BigtableColumnFamily) MarshalJSON() ([]byte, error) { + type noMethod BigtableColumnFamily + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BigtableOptions struct { + // ColumnFamilies: [Optional] List of column families to expose in the + // table schema along with their types. This list restricts the column + // families that can be referenced in queries and specifies their value + // types. You can use this list to do type conversions - see the 'type' + // field for more details. If you leave this list empty, all column + // families are present in the table schema and their values are read as + // BYTES. During a query only the column families referenced in that + // query are read from Bigtable. + ColumnFamilies []*BigtableColumnFamily `json:"columnFamilies,omitempty"` + + // IgnoreUnspecifiedColumnFamilies: [Optional] If field is true, then + // the column families that are not specified in columnFamilies list are + // not exposed in the table schema. Otherwise, they are read with BYTES + // type values. The default value is false. + IgnoreUnspecifiedColumnFamilies bool `json:"ignoreUnspecifiedColumnFamilies,omitempty"` + + // ReadRowkeyAsString: [Optional] If field is true, then the rowkey + // column families will be read and converted to string. Otherwise they + // are read with BYTES type values and users need to manually cast them + // with CAST if necessary. The default value is false. + ReadRowkeyAsString bool `json:"readRowkeyAsString,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ColumnFamilies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ColumnFamilies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BigtableOptions) MarshalJSON() ([]byte, error) { + type noMethod BigtableOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CsvOptions struct { + // AllowJaggedRows: [Optional] Indicates if BigQuery should accept rows + // that are missing trailing optional columns. If true, BigQuery treats + // missing trailing columns as null values. If false, records with + // missing trailing columns are treated as bad records, and if there are + // too many bad records, an invalid error is returned in the job result. + // The default value is false. + AllowJaggedRows bool `json:"allowJaggedRows,omitempty"` + + // AllowQuotedNewlines: [Optional] Indicates if BigQuery should allow + // quoted data sections that contain newline characters in a CSV file. + // The default value is false. + AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"` + + // Encoding: [Optional] The character encoding of the data. The + // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. + // BigQuery decodes the data after the raw, binary data has been split + // using the values of the quote and fieldDelimiter properties. + Encoding string `json:"encoding,omitempty"` + + // FieldDelimiter: [Optional] The separator for fields in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses + // the first byte of the encoded string to split the data in its raw, + // binary state. BigQuery also supports the escape sequence "\t" to + // specify a tab separator. The default value is a comma (','). + FieldDelimiter string `json:"fieldDelimiter,omitempty"` + + // Quote: [Optional] The value that is used to quote data sections in a + // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and + // then uses the first byte of the encoded string to split the data in + // its raw, binary state. The default value is a double-quote ('"'). If + // your data does not contain quoted sections, set the property value to + // an empty string. If your data contains quoted newline characters, you + // must also set the allowQuotedNewlines property to true. + // + // Default: " + Quote *string `json:"quote,omitempty"` + + // SkipLeadingRows: [Optional] The number of rows at the top of a CSV + // file that BigQuery will skip when reading the data. The default value + // is 0. This property is useful if you have header rows in the file + // that should be skipped. + SkipLeadingRows int64 `json:"skipLeadingRows,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "AllowJaggedRows") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowJaggedRows") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CsvOptions) MarshalJSON() ([]byte, error) { + type noMethod CsvOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Dataset struct { + // Access: [Optional] An array of objects that define dataset access for + // one or more entities. You can set this property when inserting or + // updating a dataset in order to control who is allowed to access the + // data. If unspecified at dataset creation time, BigQuery adds default + // dataset access for the following entities: access.specialGroup: + // projectReaders; access.role: READER; access.specialGroup: + // projectWriters; access.role: WRITER; access.specialGroup: + // projectOwners; access.role: OWNER; access.userByEmail: [dataset + // creator email]; access.role: OWNER; + Access []*DatasetAccess `json:"access,omitempty"` + + // CreationTime: [Output-only] The time when this dataset was created, + // in milliseconds since the epoch. + CreationTime int64 `json:"creationTime,omitempty,string"` + + // DatasetReference: [Required] A reference that identifies the dataset. + DatasetReference *DatasetReference `json:"datasetReference,omitempty"` + + // DefaultTableExpirationMs: [Optional] The default lifetime of all + // tables in the dataset, in milliseconds. The minimum value is 3600000 + // milliseconds (one hour). Once this property is set, all newly-created + // tables in the dataset will have an expirationTime property set to the + // creation time plus the value in this property, and changing the value + // will only affect new tables, not existing ones. When the + // expirationTime for a given table is reached, that table will be + // deleted automatically. If a table's expirationTime is modified or + // removed before the table expires, or if you provide an explicit + // expirationTime when creating a table, that value takes precedence + // over the default expiration time indicated by this property. + DefaultTableExpirationMs int64 `json:"defaultTableExpirationMs,omitempty,string"` + + // Description: [Optional] A user-friendly description of the dataset. + Description string `json:"description,omitempty"` + + // Etag: [Output-only] A hash of the resource. + Etag string `json:"etag,omitempty"` + + // FriendlyName: [Optional] A descriptive name for the dataset. + FriendlyName string `json:"friendlyName,omitempty"` + + // Id: [Output-only] The fully-qualified unique name of the dataset in + // the format projectId:datasetId. The dataset name without the project + // name is given in the datasetId field. When creating a new dataset, + // leave this field blank, and instead specify the datasetId field. + Id string `json:"id,omitempty"` + + // Kind: [Output-only] The resource type. + Kind string `json:"kind,omitempty"` + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. You can set this + // property when inserting or updating a dataset. See Labeling Datasets + // for more information. + Labels map[string]string `json:"labels,omitempty"` + + // LastModifiedTime: [Output-only] The date when this dataset or any of + // its tables was last modified, in milliseconds since the epoch. + LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"` + + // Location: [Experimental] The geographic location where the dataset + // should reside. Possible values include EU and US. The default value + // is US. + Location string `json:"location,omitempty"` + + // SelfLink: [Output-only] A URL that can be used to access the resource + // again. You can use this URL in Get or Update requests to the + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Access") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Access") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Dataset) MarshalJSON() ([]byte, error) { + type noMethod Dataset + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DatasetAccess struct { + // Domain: [Pick one] A domain to grant access to. Any users signed in + // with the domain specified will be granted the specified access. + // Example: "example.com". + Domain string `json:"domain,omitempty"` + + // GroupByEmail: [Pick one] An email address of a Google Group to grant + // access to. + GroupByEmail string `json:"groupByEmail,omitempty"` + + // Role: [Required] Describes the rights granted to the user specified + // by the other member of the access object. The following string values + // are supported: READER, WRITER, OWNER. + Role string `json:"role,omitempty"` + + // SpecialGroup: [Pick one] A special group to grant access to. Possible + // values include: projectOwners: Owners of the enclosing project. + // projectReaders: Readers of the enclosing project. projectWriters: + // Writers of the enclosing project. allAuthenticatedUsers: All + // authenticated BigQuery users. + SpecialGroup string `json:"specialGroup,omitempty"` + + // UserByEmail: [Pick one] An email address of a user to grant access + // to. For example: fred@example.com. + UserByEmail string `json:"userByEmail,omitempty"` + + // View: [Pick one] A view from a different dataset to grant access to. + // Queries executed against that view will have read access to tables in + // this dataset. The role field is not required when this field is set. + // If that view is updated by any user, access to the view needs to be + // granted again via an update operation. + View *TableReference `json:"view,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DatasetAccess) MarshalJSON() ([]byte, error) { + type noMethod DatasetAccess + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DatasetList struct { + // Datasets: An array of the dataset resources in the project. Each + // resource contains basic information. For full information about a + // particular dataset resource, use the Datasets: get method. This + // property is omitted when there are no datasets in the project. + Datasets []*DatasetListDatasets `json:"datasets,omitempty"` + + // Etag: A hash value of the results page. You can use this property to + // determine if the page has changed since the last request. + Etag string `json:"etag,omitempty"` + + // Kind: The list type. This property always returns the value + // "bigquery#datasetList". + Kind string `json:"kind,omitempty"` + + // NextPageToken: A token that can be used to request the next results + // page. This property is omitted on the final results page. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Datasets") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Datasets") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DatasetList) MarshalJSON() ([]byte, error) { + type noMethod DatasetList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DatasetListDatasets struct { + // DatasetReference: The dataset reference. Use this property to access + // specific parts of the dataset's ID, such as project ID or dataset ID. + DatasetReference *DatasetReference `json:"datasetReference,omitempty"` + + // FriendlyName: A descriptive name for the dataset, if one exists. + FriendlyName string `json:"friendlyName,omitempty"` + + // Id: The fully-qualified, unique, opaque ID of the dataset. + Id string `json:"id,omitempty"` + + // Kind: The resource type. This property always returns the value + // "bigquery#dataset". + Kind string `json:"kind,omitempty"` + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetReference") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetReference") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DatasetListDatasets) MarshalJSON() ([]byte, error) { + type noMethod DatasetListDatasets + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DatasetReference struct { + // DatasetId: [Required] A unique ID for this dataset, without the + // project name. The ID must contain only letters (a-z, A-Z), numbers + // (0-9), or underscores (_). The maximum length is 1,024 characters. + DatasetId string `json:"datasetId,omitempty"` + + // ProjectId: [Optional] The ID of the project containing this dataset. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DatasetReference) MarshalJSON() ([]byte, error) { + type noMethod DatasetReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ErrorProto struct { + // DebugInfo: Debugging information. This property is internal to Google + // and should not be used. + DebugInfo string `json:"debugInfo,omitempty"` + + // Location: Specifies where the error occurred, if present. + Location string `json:"location,omitempty"` + + // Message: A human-readable description of the error. + Message string `json:"message,omitempty"` + + // Reason: A short error code that summarizes the error. + Reason string `json:"reason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DebugInfo") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DebugInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ErrorProto) MarshalJSON() ([]byte, error) { + type noMethod ErrorProto + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExplainQueryStage struct { + // ComputeRatioAvg: Relative amount of time the average shard spent on + // CPU-bound tasks. + ComputeRatioAvg float64 `json:"computeRatioAvg,omitempty"` + + // ComputeRatioMax: Relative amount of time the slowest shard spent on + // CPU-bound tasks. + ComputeRatioMax float64 `json:"computeRatioMax,omitempty"` + + // Id: Unique ID for stage within plan. + Id int64 `json:"id,omitempty,string"` + + // Name: Human-readable name for stage. + Name string `json:"name,omitempty"` + + // ReadRatioAvg: Relative amount of time the average shard spent reading + // input. + ReadRatioAvg float64 `json:"readRatioAvg,omitempty"` + + // ReadRatioMax: Relative amount of time the slowest shard spent reading + // input. + ReadRatioMax float64 `json:"readRatioMax,omitempty"` + + // RecordsRead: Number of records read into the stage. + RecordsRead int64 `json:"recordsRead,omitempty,string"` + + // RecordsWritten: Number of records written by the stage. + RecordsWritten int64 `json:"recordsWritten,omitempty,string"` + + // Status: Current status for the stage. + Status string `json:"status,omitempty"` + + // Steps: List of operations within the stage in dependency order + // (approximately chronological). + Steps []*ExplainQueryStep `json:"steps,omitempty"` + + // WaitRatioAvg: Relative amount of time the average shard spent waiting + // to be scheduled. + WaitRatioAvg float64 `json:"waitRatioAvg,omitempty"` + + // WaitRatioMax: Relative amount of time the slowest shard spent waiting + // to be scheduled. + WaitRatioMax float64 `json:"waitRatioMax,omitempty"` + + // WriteRatioAvg: Relative amount of time the average shard spent on + // writing output. + WriteRatioAvg float64 `json:"writeRatioAvg,omitempty"` + + // WriteRatioMax: Relative amount of time the slowest shard spent on + // writing output. + WriteRatioMax float64 `json:"writeRatioMax,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ComputeRatioAvg") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ComputeRatioAvg") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ExplainQueryStage) MarshalJSON() ([]byte, error) { + type noMethod ExplainQueryStage + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *ExplainQueryStage) UnmarshalJSON(data []byte) error { + type noMethod ExplainQueryStage + var s1 struct { + ComputeRatioAvg gensupport.JSONFloat64 `json:"computeRatioAvg"` + ComputeRatioMax gensupport.JSONFloat64 `json:"computeRatioMax"` + ReadRatioAvg gensupport.JSONFloat64 `json:"readRatioAvg"` + ReadRatioMax gensupport.JSONFloat64 `json:"readRatioMax"` + WaitRatioAvg gensupport.JSONFloat64 `json:"waitRatioAvg"` + WaitRatioMax gensupport.JSONFloat64 `json:"waitRatioMax"` + WriteRatioAvg gensupport.JSONFloat64 `json:"writeRatioAvg"` + WriteRatioMax gensupport.JSONFloat64 `json:"writeRatioMax"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.ComputeRatioAvg = float64(s1.ComputeRatioAvg) + s.ComputeRatioMax = float64(s1.ComputeRatioMax) + s.ReadRatioAvg = float64(s1.ReadRatioAvg) + s.ReadRatioMax = float64(s1.ReadRatioMax) + s.WaitRatioAvg = float64(s1.WaitRatioAvg) + s.WaitRatioMax = float64(s1.WaitRatioMax) + s.WriteRatioAvg = float64(s1.WriteRatioAvg) + s.WriteRatioMax = float64(s1.WriteRatioMax) + return nil +} + +type ExplainQueryStep struct { + // Kind: Machine-readable operation type. + Kind string `json:"kind,omitempty"` + + // Substeps: Human-readable stage descriptions. + Substeps []string `json:"substeps,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExplainQueryStep) MarshalJSON() ([]byte, error) { + type noMethod ExplainQueryStep + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExternalDataConfiguration struct { + // Autodetect: [Experimental] Try to detect schema and format options + // automatically. Any option specified explicitly will be honored. + Autodetect bool `json:"autodetect,omitempty"` + + // BigtableOptions: [Optional] Additional options if sourceFormat is set + // to BIGTABLE. + BigtableOptions *BigtableOptions `json:"bigtableOptions,omitempty"` + + // Compression: [Optional] The compression type of the data source. + // Possible values include GZIP and NONE. The default value is NONE. + // This setting is ignored for Google Cloud Bigtable, Google Cloud + // Datastore backups and Avro formats. + Compression string `json:"compression,omitempty"` + + // CsvOptions: Additional properties to set if sourceFormat is set to + // CSV. + CsvOptions *CsvOptions `json:"csvOptions,omitempty"` + + // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is + // set to GOOGLE_SHEETS. + GoogleSheetsOptions *GoogleSheetsOptions `json:"googleSheetsOptions,omitempty"` + + // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow + // extra values that are not represented in the table schema. If true, + // the extra values are ignored. If false, records with extra columns + // are treated as bad records, and if there are too many bad records, an + // invalid error is returned in the job result. The default value is + // false. The sourceFormat property determines what BigQuery treats as + // an extra value: CSV: Trailing columns JSON: Named values that don't + // match any column names Google Cloud Bigtable: This setting is + // ignored. Google Cloud Datastore backups: This setting is ignored. + // Avro: This setting is ignored. + IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` + + // MaxBadRecords: [Optional] The maximum number of bad records that + // BigQuery can ignore when reading data. If the number of bad records + // exceeds this value, an invalid error is returned in the job result. + // The default value is 0, which requires that all records are valid. + // This setting is ignored for Google Cloud Bigtable, Google Cloud + // Datastore backups and Avro formats. + MaxBadRecords int64 `json:"maxBadRecords,omitempty"` + + // Schema: [Optional] The schema for the data. Schema is required for + // CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, + // Cloud Datastore backups, and Avro formats. + Schema *TableSchema `json:"schema,omitempty"` + + // SourceFormat: [Required] The data format. For CSV files, specify + // "CSV". For Google sheets, specify "GOOGLE_SHEETS". For + // newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro + // files, specify "AVRO". For Google Cloud Datastore backups, specify + // "DATASTORE_BACKUP". [Experimental] For Google Cloud Bigtable, specify + // "BIGTABLE". Please note that reading from Google Cloud Bigtable is + // experimental and has to be enabled for your project. Please contact + // Google Cloud Support to enable this for your project. + SourceFormat string `json:"sourceFormat,omitempty"` + + // SourceUris: [Required] The fully-qualified URIs that point to your + // data in Google Cloud. For Google Cloud Storage URIs: Each URI can + // contain one '*' wildcard character and it must come after the + // 'bucket' name. Size limits related to load jobs apply to external + // data sources. For Google Cloud Bigtable URIs: Exactly one URI can be + // specified and it has be a fully specified and valid HTTPS URL for a + // Google Cloud Bigtable table. For Google Cloud Datastore backups, + // exactly one URI can be specified, and it must end with + // '.backup_info'. Also, the '*' wildcard character is not allowed. + SourceUris []string `json:"sourceUris,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Autodetect") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Autodetect") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalDataConfiguration) MarshalJSON() ([]byte, error) { + type noMethod ExternalDataConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GetQueryResultsResponse struct { + // CacheHit: Whether the query result was fetched from the query cache. + CacheHit bool `json:"cacheHit,omitempty"` + + // Errors: [Output-only] All errors and warnings encountered during the + // running of the job. Errors here do not necessarily mean that the job + // has completed or was unsuccessful. + Errors []*ErrorProto `json:"errors,omitempty"` + + // Etag: A hash of this response. + Etag string `json:"etag,omitempty"` + + // JobComplete: Whether the query has completed or not. If rows or + // totalRows are present, this will always be true. If this is false, + // totalRows will not be available. + JobComplete bool `json:"jobComplete,omitempty"` + + // JobReference: Reference to the BigQuery Job that was created to run + // the query. This field will be present even if the original request + // timed out, in which case GetQueryResults can be used to read the + // results once the query has completed. Since this API only returns the + // first page of results, subsequent pages can be fetched via the same + // mechanism (GetQueryResults). + JobReference *JobReference `json:"jobReference,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // NumDmlAffectedRows: [Output-only, Experimental] The number of rows + // affected by a DML statement. Present only for DML statements INSERT, + // UPDATE or DELETE. + NumDmlAffectedRows int64 `json:"numDmlAffectedRows,omitempty,string"` + + // PageToken: A token used for paging results. + PageToken string `json:"pageToken,omitempty"` + + // Rows: An object with as many results as can be contained within the + // maximum permitted reply size. To get any additional rows, you can + // call GetQueryResults and specify the jobReference returned above. + // Present only when the query completes successfully. + Rows []*TableRow `json:"rows,omitempty"` + + // Schema: The schema of the results. Present only when the query + // completes successfully. + Schema *TableSchema `json:"schema,omitempty"` + + // TotalBytesProcessed: The total number of bytes processed for this + // query. + TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` + + // TotalRows: The total number of rows in the complete query result set, + // which can be more than the number of rows in this single page of + // results. Present only when the query completes successfully. + TotalRows uint64 `json:"totalRows,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CacheHit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CacheHit") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GetQueryResultsResponse) MarshalJSON() ([]byte, error) { + type noMethod GetQueryResultsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GoogleSheetsOptions struct { + // SkipLeadingRows: [Optional] The number of rows at the top of a sheet + // that BigQuery will skip when reading the data. The default value is + // 0. This property is useful if you have header rows that should be + // skipped. When autodetect is on, behavior is the following: * + // skipLeadingRows unspecified - Autodetect tries to detect headers in + // the first row. If they are not detected, the row is read as data. + // Otherwise data is read starting from the second row. * + // skipLeadingRows is 0 - Instructs autodetect that there are no headers + // and data should be read starting from the first row. * + // skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to + // detect headers in row N. If headers are not detected, row N is just + // skipped. Otherwise row N is used to extract column names for the + // detected schema. + SkipLeadingRows int64 `json:"skipLeadingRows,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "SkipLeadingRows") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SkipLeadingRows") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleSheetsOptions) MarshalJSON() ([]byte, error) { + type noMethod GoogleSheetsOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Job struct { + // Configuration: [Required] Describes the job configuration. + Configuration *JobConfiguration `json:"configuration,omitempty"` + + // Etag: [Output-only] A hash of this resource. + Etag string `json:"etag,omitempty"` + + // Id: [Output-only] Opaque ID field of the job + Id string `json:"id,omitempty"` + + // JobReference: [Optional] Reference describing the unique-per-user + // name of the job. + JobReference *JobReference `json:"jobReference,omitempty"` + + // Kind: [Output-only] The type of the resource. + Kind string `json:"kind,omitempty"` + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + SelfLink string `json:"selfLink,omitempty"` + + // Statistics: [Output-only] Information about the job, including + // starting time and ending time of the job. + Statistics *JobStatistics `json:"statistics,omitempty"` + + // Status: [Output-only] The status of this job. Examine this value when + // polling an asynchronous job to see if the job is complete. + Status *JobStatus `json:"status,omitempty"` + + // UserEmail: [Output-only] Email address of the user who ran the job. + UserEmail string `json:"user_email,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Configuration") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Configuration") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Job) MarshalJSON() ([]byte, error) { + type noMethod Job + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobCancelResponse struct { + // Job: The final state of the job. + Job *Job `json:"job,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Job") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Job") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobCancelResponse) MarshalJSON() ([]byte, error) { + type noMethod JobCancelResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobConfiguration struct { + // Copy: [Pick one] Copies a table. + Copy *JobConfigurationTableCopy `json:"copy,omitempty"` + + // DryRun: [Optional] If set, don't actually run this job. A valid query + // will return a mostly empty response with some processing statistics, + // while an invalid query will return the same error it would if it + // wasn't a dry run. Behavior of non-query jobs is undefined. + DryRun bool `json:"dryRun,omitempty"` + + // Extract: [Pick one] Configures an extract job. + Extract *JobConfigurationExtract `json:"extract,omitempty"` + + // Labels: [Experimental] The labels associated with this job. You can + // use these to organize and group your jobs. Label keys and values can + // be no longer than 63 characters, can only contain lowercase letters, + // numeric characters, underscores and dashes. International characters + // are allowed. Label values are optional. Label keys must start with a + // letter and each label in the list must have a different key. + Labels map[string]string `json:"labels,omitempty"` + + // Load: [Pick one] Configures a load job. + Load *JobConfigurationLoad `json:"load,omitempty"` + + // Query: [Pick one] Configures a query job. + Query *JobConfigurationQuery `json:"query,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Copy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Copy") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobConfiguration) MarshalJSON() ([]byte, error) { + type noMethod JobConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobConfigurationExtract struct { + // Compression: [Optional] The compression type to use for exported + // files. Possible values include GZIP and NONE. The default value is + // NONE. + Compression string `json:"compression,omitempty"` + + // DestinationFormat: [Optional] The exported file format. Possible + // values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default + // value is CSV. Tables with nested or repeated fields cannot be + // exported as CSV. + DestinationFormat string `json:"destinationFormat,omitempty"` + + // DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead, + // passing only one URI as necessary. The fully-qualified Google Cloud + // Storage URI where the extracted table should be written. + DestinationUri string `json:"destinationUri,omitempty"` + + // DestinationUris: [Pick one] A list of fully-qualified Google Cloud + // Storage URIs where the extracted table should be written. + DestinationUris []string `json:"destinationUris,omitempty"` + + // FieldDelimiter: [Optional] Delimiter to use between fields in the + // exported data. Default is ',' + FieldDelimiter string `json:"fieldDelimiter,omitempty"` + + // PrintHeader: [Optional] Whether to print out a header row in the + // results. Default is true. + // + // Default: true + PrintHeader *bool `json:"printHeader,omitempty"` + + // SourceTable: [Required] A reference to the table being exported. + SourceTable *TableReference `json:"sourceTable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Compression") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Compression") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobConfigurationExtract) MarshalJSON() ([]byte, error) { + type noMethod JobConfigurationExtract + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobConfigurationLoad struct { + // AllowJaggedRows: [Optional] Accept rows that are missing trailing + // optional columns. The missing values are treated as nulls. If false, + // records with missing trailing columns are treated as bad records, and + // if there are too many bad records, an invalid error is returned in + // the job result. The default value is false. Only applicable to CSV, + // ignored for other formats. + AllowJaggedRows bool `json:"allowJaggedRows,omitempty"` + + // AllowQuotedNewlines: Indicates if BigQuery should allow quoted data + // sections that contain newline characters in a CSV file. The default + // value is false. + AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"` + + // Autodetect: [Experimental] Indicates if we should automatically infer + // the options and schema for CSV and JSON sources. + Autodetect bool `json:"autodetect,omitempty"` + + // CreateDisposition: [Optional] Specifies whether the job is allowed to + // create new tables. The following values are supported: + // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. CREATE_NEVER: The table must already exist. If it does not, a + // 'notFound' error is returned in the job result. The default value is + // CREATE_IF_NEEDED. Creation, truncation and append actions occur as + // one atomic update upon job completion. + CreateDisposition string `json:"createDisposition,omitempty"` + + // DestinationTable: [Required] The destination table to load the data + // into. + DestinationTable *TableReference `json:"destinationTable,omitempty"` + + // Encoding: [Optional] The character encoding of the data. The + // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. + // BigQuery decodes the data after the raw, binary data has been split + // using the values of the quote and fieldDelimiter properties. + Encoding string `json:"encoding,omitempty"` + + // FieldDelimiter: [Optional] The separator for fields in a CSV file. + // The separator can be any ISO-8859-1 single-byte character. To use a + // character in the range 128-255, you must encode the character as + // UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then + // uses the first byte of the encoded string to split the data in its + // raw, binary state. BigQuery also supports the escape sequence "\t" to + // specify a tab separator. The default value is a comma (','). + FieldDelimiter string `json:"fieldDelimiter,omitempty"` + + // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow + // extra values that are not represented in the table schema. If true, + // the extra values are ignored. If false, records with extra columns + // are treated as bad records, and if there are too many bad records, an + // invalid error is returned in the job result. The default value is + // false. The sourceFormat property determines what BigQuery treats as + // an extra value: CSV: Trailing columns JSON: Named values that don't + // match any column names + IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` + + // MaxBadRecords: [Optional] The maximum number of bad records that + // BigQuery can ignore when running the job. If the number of bad + // records exceeds this value, an invalid error is returned in the job + // result. The default value is 0, which requires that all records are + // valid. + MaxBadRecords int64 `json:"maxBadRecords,omitempty"` + + // NullMarker: [Optional] Specifies a string that represents a null + // value in a CSV file. For example, if you specify "\N", BigQuery + // interprets "\N" as a null value when loading a CSV file. The default + // value is the empty string. If you set this property to a custom + // value, BigQuery still interprets the empty string as a null value for + // all data types except for STRING and BYTE. For STRING and BYTE + // columns, BigQuery interprets the empty string as an empty value. + NullMarker string `json:"nullMarker,omitempty"` + + // ProjectionFields: [Experimental] If sourceFormat is set to + // "DATASTORE_BACKUP", indicates which entity properties to load into + // BigQuery from a Cloud Datastore backup. Property names are case + // sensitive and must be top-level properties. If no properties are + // specified, BigQuery loads all properties. If any named property isn't + // found in the Cloud Datastore backup, an invalid error is returned in + // the job result. + ProjectionFields []string `json:"projectionFields,omitempty"` + + // Quote: [Optional] The value that is used to quote data sections in a + // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and + // then uses the first byte of the encoded string to split the data in + // its raw, binary state. The default value is a double-quote ('"'). If + // your data does not contain quoted sections, set the property value to + // an empty string. If your data contains quoted newline characters, you + // must also set the allowQuotedNewlines property to true. + // + // Default: " + Quote *string `json:"quote,omitempty"` + + // Schema: [Optional] The schema for the destination table. The schema + // can be omitted if the destination table already exists, or if you're + // loading data from Google Cloud Datastore. + Schema *TableSchema `json:"schema,omitempty"` + + // SchemaInline: [Deprecated] The inline schema. For CSV schemas, + // specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, + // bar:INTEGER, baz:FLOAT". + SchemaInline string `json:"schemaInline,omitempty"` + + // SchemaInlineFormat: [Deprecated] The format of the schemaInline + // property. + SchemaInlineFormat string `json:"schemaInlineFormat,omitempty"` + + // SchemaUpdateOptions: [Experimental] Allows the schema of the + // desitination table to be updated as a side effect of the load job. + // Schema update options are supported in two cases: when + // writeDisposition is WRITE_APPEND; when writeDisposition is + // WRITE_TRUNCATE and the destination table is a partition of a table, + // specified by partition decorators. For normal tables, WRITE_TRUNCATE + // will always overwrite the schema. One or more of the following values + // are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to + // the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field + // in the original schema to nullable. + SchemaUpdateOptions []string `json:"schemaUpdateOptions,omitempty"` + + // SkipLeadingRows: [Optional] The number of rows at the top of a CSV + // file that BigQuery will skip when loading the data. The default value + // is 0. This property is useful if you have header rows in the file + // that should be skipped. + SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"` + + // SourceFormat: [Optional] The format of the data files. For CSV files, + // specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For + // newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, + // specify "AVRO". The default value is CSV. + SourceFormat string `json:"sourceFormat,omitempty"` + + // SourceUris: [Required] The fully-qualified URIs that point to your + // data in Google Cloud Storage. Each URI can contain one '*' wildcard + // character and it must come after the 'bucket' name. + SourceUris []string `json:"sourceUris,omitempty"` + + // WriteDisposition: [Optional] Specifies the action that occurs if the + // destination table already exists. The following values are supported: + // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data. WRITE_APPEND: If the table already exists, BigQuery + // appends the data to the table. WRITE_EMPTY: If the table already + // exists and contains data, a 'duplicate' error is returned in the job + // result. The default value is WRITE_APPEND. Each action is atomic and + // only occurs if BigQuery is able to complete the job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + WriteDisposition string `json:"writeDisposition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowJaggedRows") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowJaggedRows") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobConfigurationLoad) MarshalJSON() ([]byte, error) { + type noMethod JobConfigurationLoad + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobConfigurationQuery struct { + // AllowLargeResults: If true, allows the query to produce arbitrarily + // large result tables at a slight cost in performance. Requires + // destinationTable to be set. + AllowLargeResults bool `json:"allowLargeResults,omitempty"` + + // CreateDisposition: [Optional] Specifies whether the job is allowed to + // create new tables. The following values are supported: + // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. CREATE_NEVER: The table must already exist. If it does not, a + // 'notFound' error is returned in the job result. The default value is + // CREATE_IF_NEEDED. Creation, truncation and append actions occur as + // one atomic update upon job completion. + CreateDisposition string `json:"createDisposition,omitempty"` + + // DefaultDataset: [Optional] Specifies the default dataset to use for + // unqualified table names in the query. + DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"` + + // DestinationTable: [Optional] Describes the table where the query + // results should be stored. If not present, a new table will be created + // to store the results. + DestinationTable *TableReference `json:"destinationTable,omitempty"` + + // FlattenResults: [Optional] Flattens all nested and repeated fields in + // the query results. The default value is true. allowLargeResults must + // be true if this is set to false. + // + // Default: true + FlattenResults *bool `json:"flattenResults,omitempty"` + + // MaximumBillingTier: [Optional] Limits the billing tier for this job. + // Queries that have resource usage beyond this tier will fail (without + // incurring a charge). If unspecified, this will be set to your project + // default. + // + // Default: 1 + MaximumBillingTier *int64 `json:"maximumBillingTier,omitempty"` + + // MaximumBytesBilled: [Optional] Limits the bytes billed for this job. + // Queries that will have bytes billed beyond this limit will fail + // (without incurring a charge). If unspecified, this will be set to + // your project default. + MaximumBytesBilled int64 `json:"maximumBytesBilled,omitempty,string"` + + // ParameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to + // use positional (?) query parameters or to NAMED to use named + // (@myparam) query parameters in this query. + ParameterMode string `json:"parameterMode,omitempty"` + + // PreserveNulls: [Deprecated] This property is deprecated. + PreserveNulls bool `json:"preserveNulls,omitempty"` + + // Priority: [Optional] Specifies a priority for the query. Possible + // values include INTERACTIVE and BATCH. The default value is + // INTERACTIVE. + Priority string `json:"priority,omitempty"` + + // Query: [Required] BigQuery SQL query to execute. + Query string `json:"query,omitempty"` + + // QueryParameters: Query parameters for standard SQL queries. + QueryParameters []*QueryParameter `json:"queryParameters,omitempty"` + + // SchemaUpdateOptions: [Experimental] Allows the schema of the + // destination table to be updated as a side effect of the query job. + // Schema update options are supported in two cases: when + // writeDisposition is WRITE_APPEND; when writeDisposition is + // WRITE_TRUNCATE and the destination table is a partition of a table, + // specified by partition decorators. For normal tables, WRITE_TRUNCATE + // will always overwrite the schema. One or more of the following values + // are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to + // the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field + // in the original schema to nullable. + SchemaUpdateOptions []string `json:"schemaUpdateOptions,omitempty"` + + // TableDefinitions: [Optional] If querying an external data source + // outside of BigQuery, describes the data format, location and other + // properties of the data source. By defining these properties, the data + // source can then be queried as if it were a standard BigQuery table. + TableDefinitions map[string]ExternalDataConfiguration `json:"tableDefinitions,omitempty"` + + // UseLegacySql: Specifies whether to use BigQuery's legacy SQL dialect + // for this query. The default value is true. If set to false, the query + // will use BigQuery's standard SQL: + // https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is + // set to false, the values of allowLargeResults and flattenResults are + // ignored; query will be run as if allowLargeResults is true and + // flattenResults is false. + UseLegacySql bool `json:"useLegacySql,omitempty"` + + // UseQueryCache: [Optional] Whether to look for the result in the query + // cache. The query cache is a best-effort cache that will be flushed + // whenever tables in the query are modified. Moreover, the query cache + // is only available when a query does not have a destination table + // specified. The default value is true. + // + // Default: true + UseQueryCache *bool `json:"useQueryCache,omitempty"` + + // UserDefinedFunctionResources: [Experimental] Describes user-defined + // function resources used in the query. + UserDefinedFunctionResources []*UserDefinedFunctionResource `json:"userDefinedFunctionResources,omitempty"` + + // WriteDisposition: [Optional] Specifies the action that occurs if the + // destination table already exists. The following values are supported: + // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data. WRITE_APPEND: If the table already exists, BigQuery + // appends the data to the table. WRITE_EMPTY: If the table already + // exists and contains data, a 'duplicate' error is returned in the job + // result. The default value is WRITE_EMPTY. Each action is atomic and + // only occurs if BigQuery is able to complete the job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + WriteDisposition string `json:"writeDisposition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowLargeResults") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowLargeResults") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobConfigurationQuery) MarshalJSON() ([]byte, error) { + type noMethod JobConfigurationQuery + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobConfigurationTableCopy struct { + // CreateDisposition: [Optional] Specifies whether the job is allowed to + // create new tables. The following values are supported: + // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. CREATE_NEVER: The table must already exist. If it does not, a + // 'notFound' error is returned in the job result. The default value is + // CREATE_IF_NEEDED. Creation, truncation and append actions occur as + // one atomic update upon job completion. + CreateDisposition string `json:"createDisposition,omitempty"` + + // DestinationTable: [Required] The destination table + DestinationTable *TableReference `json:"destinationTable,omitempty"` + + // SourceTable: [Pick one] Source table to copy. + SourceTable *TableReference `json:"sourceTable,omitempty"` + + // SourceTables: [Pick one] Source tables to copy. + SourceTables []*TableReference `json:"sourceTables,omitempty"` + + // WriteDisposition: [Optional] Specifies the action that occurs if the + // destination table already exists. The following values are supported: + // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data. WRITE_APPEND: If the table already exists, BigQuery + // appends the data to the table. WRITE_EMPTY: If the table already + // exists and contains data, a 'duplicate' error is returned in the job + // result. The default value is WRITE_EMPTY. Each action is atomic and + // only occurs if BigQuery is able to complete the job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + WriteDisposition string `json:"writeDisposition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateDisposition") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateDisposition") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobConfigurationTableCopy) MarshalJSON() ([]byte, error) { + type noMethod JobConfigurationTableCopy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobList struct { + // Etag: A hash of this page of results. + Etag string `json:"etag,omitempty"` + + // Jobs: List of jobs that were requested. + Jobs []*JobListJobs `json:"jobs,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // NextPageToken: A token to request the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobList) MarshalJSON() ([]byte, error) { + type noMethod JobList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobListJobs struct { + // Configuration: [Full-projection-only] Specifies the job + // configuration. + Configuration *JobConfiguration `json:"configuration,omitempty"` + + // ErrorResult: A result object that will be present only if the job has + // failed. + ErrorResult *ErrorProto `json:"errorResult,omitempty"` + + // Id: Unique opaque ID of the job. + Id string `json:"id,omitempty"` + + // JobReference: Job reference uniquely identifying the job. + JobReference *JobReference `json:"jobReference,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // State: Running state of the job. When the state is DONE, errorResult + // can be checked to determine whether the job succeeded or failed. + State string `json:"state,omitempty"` + + // Statistics: [Output-only] Information about the job, including + // starting time and ending time of the job. + Statistics *JobStatistics `json:"statistics,omitempty"` + + // Status: [Full-projection-only] Describes the state of the job. + Status *JobStatus `json:"status,omitempty"` + + // UserEmail: [Full-projection-only] Email address of the user who ran + // the job. + UserEmail string `json:"user_email,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Configuration") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Configuration") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobListJobs) MarshalJSON() ([]byte, error) { + type noMethod JobListJobs + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobReference struct { + // JobId: [Required] The ID of the job. The ID must contain only letters + // (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The + // maximum length is 1,024 characters. + JobId string `json:"jobId,omitempty"` + + // ProjectId: [Required] The ID of the project containing this job. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JobId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JobId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobReference) MarshalJSON() ([]byte, error) { + type noMethod JobReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobStatistics struct { + // CreationTime: [Output-only] Creation time of this job, in + // milliseconds since the epoch. This field will be present on all jobs. + CreationTime int64 `json:"creationTime,omitempty,string"` + + // EndTime: [Output-only] End time of this job, in milliseconds since + // the epoch. This field will be present whenever a job is in the DONE + // state. + EndTime int64 `json:"endTime,omitempty,string"` + + // Extract: [Output-only] Statistics for an extract job. + Extract *JobStatistics4 `json:"extract,omitempty"` + + // Load: [Output-only] Statistics for a load job. + Load *JobStatistics3 `json:"load,omitempty"` + + // Query: [Output-only] Statistics for a query job. + Query *JobStatistics2 `json:"query,omitempty"` + + // StartTime: [Output-only] Start time of this job, in milliseconds + // since the epoch. This field will be present when the job transitions + // from the PENDING state to either RUNNING or DONE. + StartTime int64 `json:"startTime,omitempty,string"` + + // TotalBytesProcessed: [Output-only] [Deprecated] Use the bytes + // processed in the query statistics instead. + TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobStatistics) MarshalJSON() ([]byte, error) { + type noMethod JobStatistics + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobStatistics2 struct { + // BillingTier: [Output-only] Billing tier for the job. + BillingTier int64 `json:"billingTier,omitempty"` + + // CacheHit: [Output-only] Whether the query result was fetched from the + // query cache. + CacheHit bool `json:"cacheHit,omitempty"` + + // NumDmlAffectedRows: [Output-only, Experimental] The number of rows + // affected by a DML statement. Present only for DML statements INSERT, + // UPDATE or DELETE. + NumDmlAffectedRows int64 `json:"numDmlAffectedRows,omitempty,string"` + + // QueryPlan: [Output-only, Experimental] Describes execution plan for + // the query. + QueryPlan []*ExplainQueryStage `json:"queryPlan,omitempty"` + + // ReferencedTables: [Output-only, Experimental] Referenced tables for + // the job. Queries that reference more than 50 tables will not have a + // complete list. + ReferencedTables []*TableReference `json:"referencedTables,omitempty"` + + // Schema: [Output-only, Experimental] The schema of the results. + // Present only for successful dry run of non-legacy SQL queries. + Schema *TableSchema `json:"schema,omitempty"` + + // StatementType: [Output-only, Experimental] The type of query + // statement, if valid. + StatementType string `json:"statementType,omitempty"` + + // TotalBytesBilled: [Output-only] Total bytes billed for the job. + TotalBytesBilled int64 `json:"totalBytesBilled,omitempty,string"` + + // TotalBytesProcessed: [Output-only] Total bytes processed for the job. + TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` + + // UndeclaredQueryParameters: [Output-only, Experimental] Standard SQL + // only: list of undeclared query parameters detected during a dry run + // validation. + UndeclaredQueryParameters []*QueryParameter `json:"undeclaredQueryParameters,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BillingTier") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BillingTier") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobStatistics2) MarshalJSON() ([]byte, error) { + type noMethod JobStatistics2 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobStatistics3 struct { + // InputFileBytes: [Output-only] Number of bytes of source data in a + // load job. + InputFileBytes int64 `json:"inputFileBytes,omitempty,string"` + + // InputFiles: [Output-only] Number of source files in a load job. + InputFiles int64 `json:"inputFiles,omitempty,string"` + + // OutputBytes: [Output-only] Size of the loaded data in bytes. Note + // that while a load job is in the running state, this value may change. + OutputBytes int64 `json:"outputBytes,omitempty,string"` + + // OutputRows: [Output-only] Number of rows imported in a load job. Note + // that while an import job is in the running state, this value may + // change. + OutputRows int64 `json:"outputRows,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "InputFileBytes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InputFileBytes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobStatistics3) MarshalJSON() ([]byte, error) { + type noMethod JobStatistics3 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobStatistics4 struct { + // DestinationUriFileCounts: [Output-only] Number of files per + // destination URI or URI pattern specified in the extract + // configuration. These values will be in the same order as the URIs + // specified in the 'destinationUris' field. + DestinationUriFileCounts googleapi.Int64s `json:"destinationUriFileCounts,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DestinationUriFileCounts") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationUriFileCounts") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *JobStatistics4) MarshalJSON() ([]byte, error) { + type noMethod JobStatistics4 + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JobStatus struct { + // ErrorResult: [Output-only] Final error result of the job. If present, + // indicates that the job has completed and was unsuccessful. + ErrorResult *ErrorProto `json:"errorResult,omitempty"` + + // Errors: [Output-only] All errors encountered during the running of + // the job. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. + Errors []*ErrorProto `json:"errors,omitempty"` + + // State: [Output-only] Running state of the job. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorResult") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorResult") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JobStatus) MarshalJSON() ([]byte, error) { + type noMethod JobStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type JsonValue interface{} + +type ProjectList struct { + // Etag: A hash of the page of results + Etag string `json:"etag,omitempty"` + + // Kind: The type of list. + Kind string `json:"kind,omitempty"` + + // NextPageToken: A token to request the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Projects: Projects to which you have at least READ access. + Projects []*ProjectListProjects `json:"projects,omitempty"` + + // TotalItems: The total number of projects in the list. + TotalItems int64 `json:"totalItems,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ProjectList) MarshalJSON() ([]byte, error) { + type noMethod ProjectList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ProjectListProjects struct { + // FriendlyName: A descriptive name for this project. + FriendlyName string `json:"friendlyName,omitempty"` + + // Id: An opaque ID of this project. + Id string `json:"id,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NumericId: The numeric ID of this project. + NumericId uint64 `json:"numericId,omitempty,string"` + + // ProjectReference: A unique reference to this project. + ProjectReference *ProjectReference `json:"projectReference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FriendlyName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FriendlyName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ProjectListProjects) MarshalJSON() ([]byte, error) { + type noMethod ProjectListProjects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ProjectReference struct { + // ProjectId: [Required] ID of the project. Can be either the numeric ID + // or the assigned ID of the project. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ProjectReference) MarshalJSON() ([]byte, error) { + type noMethod ProjectReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryParameter struct { + // Name: [Optional] If unset, this is a positional parameter. Otherwise, + // should be unique within a query. + Name string `json:"name,omitempty"` + + // ParameterType: [Required] The type of this parameter. + ParameterType *QueryParameterType `json:"parameterType,omitempty"` + + // ParameterValue: [Required] The value of this parameter. + ParameterValue *QueryParameterValue `json:"parameterValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryParameter) MarshalJSON() ([]byte, error) { + type noMethod QueryParameter + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryParameterType struct { + // ArrayType: [Optional] The type of the array's elements, if this is an + // array. + ArrayType *QueryParameterType `json:"arrayType,omitempty"` + + // StructTypes: [Optional] The types of the fields of this struct, in + // order, if this is a struct. + StructTypes []*QueryParameterTypeStructTypes `json:"structTypes,omitempty"` + + // Type: [Required] The top level type of this field. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ArrayType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArrayType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryParameterType) MarshalJSON() ([]byte, error) { + type noMethod QueryParameterType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryParameterTypeStructTypes struct { + // Description: [Optional] Human-oriented description of the field. + Description string `json:"description,omitempty"` + + // Name: [Optional] The name of this field. + Name string `json:"name,omitempty"` + + // Type: [Required] The type of this field. + Type *QueryParameterType `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryParameterTypeStructTypes) MarshalJSON() ([]byte, error) { + type noMethod QueryParameterTypeStructTypes + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryParameterValue struct { + // ArrayValues: [Optional] The array values, if this is an array type. + ArrayValues []*QueryParameterValue `json:"arrayValues,omitempty"` + + // StructValues: [Optional] The struct field values, in order of the + // struct type's declaration. + StructValues map[string]QueryParameterValue `json:"structValues,omitempty"` + + // Value: [Optional] The value of this value, if a simple scalar type. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ArrayValues") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ArrayValues") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryParameterValue) MarshalJSON() ([]byte, error) { + type noMethod QueryParameterValue + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryRequest struct { + // DefaultDataset: [Optional] Specifies the default datasetId and + // projectId to assume for any unqualified table names in the query. If + // not set, all table names in the query string must be qualified in the + // format 'datasetId.tableId'. + DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"` + + // DryRun: [Optional] If set to true, BigQuery doesn't run the job. + // Instead, if the query is valid, BigQuery returns statistics about the + // job such as how many bytes would be processed. If the query is + // invalid, an error returns. The default value is false. + DryRun bool `json:"dryRun,omitempty"` + + // Kind: The resource type of the request. + Kind string `json:"kind,omitempty"` + + // MaxResults: [Optional] The maximum number of rows of data to return + // per page of results. Setting this flag to a small value such as 1000 + // and then paging through results might improve reliability when the + // query result set is large. In addition to this limit, responses are + // also limited to 10 MB. By default, there is no maximum row count, and + // only the byte limit applies. + MaxResults int64 `json:"maxResults,omitempty"` + + // ParameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to + // use positional (?) query parameters or to NAMED to use named + // (@myparam) query parameters in this query. + ParameterMode string `json:"parameterMode,omitempty"` + + // PreserveNulls: [Deprecated] This property is deprecated. + PreserveNulls bool `json:"preserveNulls,omitempty"` + + // Query: [Required] A query string, following the BigQuery query + // syntax, of the query to execute. Example: "SELECT count(f1) FROM + // [myProjectId:myDatasetId.myTableId]". + Query string `json:"query,omitempty"` + + // QueryParameters: [Experimental] Query parameters for Standard SQL + // queries. + QueryParameters []*QueryParameter `json:"queryParameters,omitempty"` + + // TimeoutMs: [Optional] How long to wait for the query to complete, in + // milliseconds, before the request times out and returns. Note that + // this is only a timeout for the request, not the query. If the query + // takes longer to run than the timeout value, the call returns without + // any results and with the 'jobComplete' flag set to false. You can + // call GetQueryResults() to wait for the query to complete and read the + // results. The default value is 10000 milliseconds (10 seconds). + TimeoutMs int64 `json:"timeoutMs,omitempty"` + + // UseLegacySql: Specifies whether to use BigQuery's legacy SQL dialect + // for this query. The default value is true. If set to false, the query + // will use BigQuery's standard SQL: + // https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is + // set to false, the values of allowLargeResults and flattenResults are + // ignored; query will be run as if allowLargeResults is true and + // flattenResults is false. + // + // Default: true + UseLegacySql *bool `json:"useLegacySql,omitempty"` + + // UseQueryCache: [Optional] Whether to look for the result in the query + // cache. The query cache is a best-effort cache that will be flushed + // whenever tables in the query are modified. The default value is true. + // + // Default: true + UseQueryCache *bool `json:"useQueryCache,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultDataset") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultDataset") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *QueryRequest) MarshalJSON() ([]byte, error) { + type noMethod QueryRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type QueryResponse struct { + // CacheHit: Whether the query result was fetched from the query cache. + CacheHit bool `json:"cacheHit,omitempty"` + + // Errors: [Output-only] All errors and warnings encountered during the + // running of the job. Errors here do not necessarily mean that the job + // has completed or was unsuccessful. + Errors []*ErrorProto `json:"errors,omitempty"` + + // JobComplete: Whether the query has completed or not. If rows or + // totalRows are present, this will always be true. If this is false, + // totalRows will not be available. + JobComplete bool `json:"jobComplete,omitempty"` + + // JobReference: Reference to the Job that was created to run the query. + // This field will be present even if the original request timed out, in + // which case GetQueryResults can be used to read the results once the + // query has completed. Since this API only returns the first page of + // results, subsequent pages can be fetched via the same mechanism + // (GetQueryResults). + JobReference *JobReference `json:"jobReference,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NumDmlAffectedRows: [Output-only, Experimental] The number of rows + // affected by a DML statement. Present only for DML statements INSERT, + // UPDATE or DELETE. + NumDmlAffectedRows int64 `json:"numDmlAffectedRows,omitempty,string"` + + // PageToken: A token used for paging results. + PageToken string `json:"pageToken,omitempty"` + + // Rows: An object with as many results as can be contained within the + // maximum permitted reply size. To get any additional rows, you can + // call GetQueryResults and specify the jobReference returned above. + Rows []*TableRow `json:"rows,omitempty"` + + // Schema: The schema of the results. Present only when the query + // completes successfully. + Schema *TableSchema `json:"schema,omitempty"` + + // TotalBytesProcessed: The total number of bytes processed for this + // query. If this query was a dry run, this is the number of bytes that + // would be processed if the query were run. + TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` + + // TotalRows: The total number of rows in the complete query result set, + // which can be more than the number of rows in this single page of + // results. + TotalRows uint64 `json:"totalRows,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CacheHit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CacheHit") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QueryResponse) MarshalJSON() ([]byte, error) { + type noMethod QueryResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Streamingbuffer struct { + // EstimatedBytes: [Output-only] A lower-bound estimate of the number of + // bytes currently in the streaming buffer. + EstimatedBytes uint64 `json:"estimatedBytes,omitempty,string"` + + // EstimatedRows: [Output-only] A lower-bound estimate of the number of + // rows currently in the streaming buffer. + EstimatedRows uint64 `json:"estimatedRows,omitempty,string"` + + // OldestEntryTime: [Output-only] Contains the timestamp of the oldest + // entry in the streaming buffer, in milliseconds since the epoch, if + // the streaming buffer is available. + OldestEntryTime uint64 `json:"oldestEntryTime,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EstimatedBytes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EstimatedBytes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Streamingbuffer) MarshalJSON() ([]byte, error) { + type noMethod Streamingbuffer + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Table struct { + // CreationTime: [Output-only] The time when this table was created, in + // milliseconds since the epoch. + CreationTime int64 `json:"creationTime,omitempty,string"` + + // Description: [Optional] A user-friendly description of this table. + Description string `json:"description,omitempty"` + + // Etag: [Output-only] A hash of this resource. + Etag string `json:"etag,omitempty"` + + // ExpirationTime: [Optional] The time when this table expires, in + // milliseconds since the epoch. If not present, the table will persist + // indefinitely. Expired tables will be deleted and their storage + // reclaimed. + ExpirationTime int64 `json:"expirationTime,omitempty,string"` + + // ExternalDataConfiguration: [Optional] Describes the data format, + // location, and other properties of a table stored outside of BigQuery. + // By defining these properties, the data source can then be queried as + // if it were a standard BigQuery table. + ExternalDataConfiguration *ExternalDataConfiguration `json:"externalDataConfiguration,omitempty"` + + // FriendlyName: [Optional] A descriptive name for this table. + FriendlyName string `json:"friendlyName,omitempty"` + + // Id: [Output-only] An opaque ID uniquely identifying the table. + Id string `json:"id,omitempty"` + + // Kind: [Output-only] The type of the resource. + Kind string `json:"kind,omitempty"` + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. Label keys and values + // can be no longer than 63 characters, can only contain lowercase + // letters, numeric characters, underscores and dashes. International + // characters are allowed. Label values are optional. Label keys must + // start with a letter and each label in the list must have a different + // key. + Labels map[string]string `json:"labels,omitempty"` + + // LastModifiedTime: [Output-only] The time when this table was last + // modified, in milliseconds since the epoch. + LastModifiedTime uint64 `json:"lastModifiedTime,omitempty,string"` + + // Location: [Output-only] The geographic location where the table + // resides. This value is inherited from the dataset. + Location string `json:"location,omitempty"` + + // NumBytes: [Output-only] The size of this table in bytes, excluding + // any data in the streaming buffer. + NumBytes int64 `json:"numBytes,omitempty,string"` + + // NumLongTermBytes: [Output-only] The number of bytes in the table that + // are considered "long-term storage". + NumLongTermBytes int64 `json:"numLongTermBytes,omitempty,string"` + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + NumRows uint64 `json:"numRows,omitempty,string"` + + // Schema: [Optional] Describes the schema of this table. + Schema *TableSchema `json:"schema,omitempty"` + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + SelfLink string `json:"selfLink,omitempty"` + + // StreamingBuffer: [Output-only] Contains information regarding this + // table's streaming buffer, if one is present. This field will be + // absent if the table is not being streamed to or if there is no data + // in the streaming buffer. + StreamingBuffer *Streamingbuffer `json:"streamingBuffer,omitempty"` + + // TableReference: [Required] Reference describing the ID of this table. + TableReference *TableReference `json:"tableReference,omitempty"` + + // TimePartitioning: [Experimental] If specified, configures time-based + // partitioning for this table. + TimePartitioning *TimePartitioning `json:"timePartitioning,omitempty"` + + // Type: [Output-only] Describes the table type. The following values + // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table + // defined by a SQL query. EXTERNAL: A table that references data stored + // in an external storage system, such as Google Cloud Storage. The + // default value is TABLE. + Type string `json:"type,omitempty"` + + // View: [Optional] The view definition. + View *ViewDefinition `json:"view,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Table) MarshalJSON() ([]byte, error) { + type noMethod Table + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableCell struct { + V interface{} `json:"v,omitempty"` + + // ForceSendFields is a list of field names (e.g. "V") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "V") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableCell) MarshalJSON() ([]byte, error) { + type noMethod TableCell + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableDataInsertAllRequest struct { + // IgnoreUnknownValues: [Optional] Accept rows that contain values that + // do not match the schema. The unknown values are ignored. Default is + // false, which treats unknown values as errors. + IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // Rows: The rows to insert. + Rows []*TableDataInsertAllRequestRows `json:"rows,omitempty"` + + // SkipInvalidRows: [Optional] Insert all valid rows of a request, even + // if invalid rows exist. The default value is false, which causes the + // entire request to fail if any invalid rows exist. + SkipInvalidRows bool `json:"skipInvalidRows,omitempty"` + + // TemplateSuffix: [Experimental] If specified, treats the destination + // table as a base template, and inserts the rows into an instance table + // named "{destination}{templateSuffix}". BigQuery will manage creation + // of the instance table, using the schema of the base template table. + // See + // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables for considerations when working with templates + // tables. + TemplateSuffix string `json:"templateSuffix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IgnoreUnknownValues") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IgnoreUnknownValues") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TableDataInsertAllRequest) MarshalJSON() ([]byte, error) { + type noMethod TableDataInsertAllRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableDataInsertAllRequestRows struct { + // InsertId: [Optional] A unique ID for each row. BigQuery uses this + // property to detect duplicate insertion requests on a best-effort + // basis. + InsertId string `json:"insertId,omitempty"` + + // Json: [Required] A JSON object that contains a row of data. The + // object's properties and values must match the destination table's + // schema. + Json map[string]JsonValue `json:"json,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InsertId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InsertId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableDataInsertAllRequestRows) MarshalJSON() ([]byte, error) { + type noMethod TableDataInsertAllRequestRows + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableDataInsertAllResponse struct { + // InsertErrors: An array of errors for rows that were not inserted. + InsertErrors []*TableDataInsertAllResponseInsertErrors `json:"insertErrors,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "InsertErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InsertErrors") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableDataInsertAllResponse) MarshalJSON() ([]byte, error) { + type noMethod TableDataInsertAllResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableDataInsertAllResponseInsertErrors struct { + // Errors: Error information for the row indicated by the index + // property. + Errors []*ErrorProto `json:"errors,omitempty"` + + // Index: The index of the row that error applies to. + Index int64 `json:"index,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableDataInsertAllResponseInsertErrors) MarshalJSON() ([]byte, error) { + type noMethod TableDataInsertAllResponseInsertErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableDataList struct { + // Etag: A hash of this page of results. + Etag string `json:"etag,omitempty"` + + // Kind: The resource type of the response. + Kind string `json:"kind,omitempty"` + + // PageToken: A token used for paging results. Providing this token + // instead of the startIndex parameter can help you retrieve stable + // results when an underlying table is changing. + PageToken string `json:"pageToken,omitempty"` + + // Rows: Rows of results. + Rows []*TableRow `json:"rows,omitempty"` + + // TotalRows: The total number of rows in the complete table. + TotalRows int64 `json:"totalRows,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableDataList) MarshalJSON() ([]byte, error) { + type noMethod TableDataList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableFieldSchema struct { + // Description: [Optional] The field description. The maximum length is + // 16K characters. + Description string `json:"description,omitempty"` + + // Fields: [Optional] Describes the nested schema fields if the type + // property is set to RECORD. + Fields []*TableFieldSchema `json:"fields,omitempty"` + + // Mode: [Optional] The field mode. Possible values include NULLABLE, + // REQUIRED and REPEATED. The default value is NULLABLE. + Mode string `json:"mode,omitempty"` + + // Name: [Required] The field name. The name must contain only letters + // (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a + // letter or underscore. The maximum length is 128 characters. + Name string `json:"name,omitempty"` + + // Type: [Required] The field data type. Possible values include STRING, + // BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as + // FLOAT), BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, + // DATETIME, RECORD (where RECORD indicates that the field contains a + // nested schema) or STRUCT (same as RECORD). + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableFieldSchema) MarshalJSON() ([]byte, error) { + type noMethod TableFieldSchema + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableList struct { + // Etag: A hash of this page of results. + Etag string `json:"etag,omitempty"` + + // Kind: The type of list. + Kind string `json:"kind,omitempty"` + + // NextPageToken: A token to request the next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Tables: Tables in the requested dataset. + Tables []*TableListTables `json:"tables,omitempty"` + + // TotalItems: The total number of tables in the dataset. + TotalItems int64 `json:"totalItems,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableList) MarshalJSON() ([]byte, error) { + type noMethod TableList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableListTables struct { + // FriendlyName: The user-friendly name for this table. + FriendlyName string `json:"friendlyName,omitempty"` + + // Id: An opaque ID of the table + Id string `json:"id,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. + Labels map[string]string `json:"labels,omitempty"` + + // TableReference: A reference uniquely identifying the table. + TableReference *TableReference `json:"tableReference,omitempty"` + + // Type: The type of table. Possible values are: TABLE, VIEW. + Type string `json:"type,omitempty"` + + // View: Additional details for a view. + View *TableListTablesView `json:"view,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FriendlyName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FriendlyName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableListTables) MarshalJSON() ([]byte, error) { + type noMethod TableListTables + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TableListTablesView: Additional details for a view. +type TableListTablesView struct { + // UseLegacySql: True if view is defined in legacy SQL dialect, false if + // in standard SQL. + UseLegacySql bool `json:"useLegacySql,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UseLegacySql") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UseLegacySql") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableListTablesView) MarshalJSON() ([]byte, error) { + type noMethod TableListTablesView + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableReference struct { + // DatasetId: [Required] The ID of the dataset containing this table. + DatasetId string `json:"datasetId,omitempty"` + + // ProjectId: [Required] The ID of the project containing this table. + ProjectId string `json:"projectId,omitempty"` + + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + TableId string `json:"tableId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableReference) MarshalJSON() ([]byte, error) { + type noMethod TableReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableRow struct { + // F: Represents a single row in the result set, consisting of one or + // more fields. + F []*TableCell `json:"f,omitempty"` + + // ForceSendFields is a list of field names (e.g. "F") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "F") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableRow) MarshalJSON() ([]byte, error) { + type noMethod TableRow + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableSchema struct { + // Fields: Describes the fields in a table. + Fields []*TableFieldSchema `json:"fields,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fields") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fields") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableSchema) MarshalJSON() ([]byte, error) { + type noMethod TableSchema + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TimePartitioning struct { + // ExpirationMs: [Optional] Number of milliseconds for which to keep the + // storage for a partition. + ExpirationMs int64 `json:"expirationMs,omitempty,string"` + + // Type: [Required] The only type supported is DAY, which will generate + // one partition per day based on data loading time. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpirationMs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpirationMs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TimePartitioning) MarshalJSON() ([]byte, error) { + type noMethod TimePartitioning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserDefinedFunctionResource struct { + // InlineCode: [Pick one] An inline resource that contains code for a + // user-defined function (UDF). Providing a inline code resource is + // equivalent to providing a URI for a file containing the same code. + InlineCode string `json:"inlineCode,omitempty"` + + // ResourceUri: [Pick one] A code resource to load from a Google Cloud + // Storage URI (gs://bucket/path). + ResourceUri string `json:"resourceUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InlineCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InlineCode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserDefinedFunctionResource) MarshalJSON() ([]byte, error) { + type noMethod UserDefinedFunctionResource + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ViewDefinition struct { + // Query: [Required] A query that BigQuery executes when the view is + // referenced. + Query string `json:"query,omitempty"` + + // UseLegacySql: Specifies whether to use BigQuery's legacy SQL for this + // view. The default value is true. If set to false, the view will use + // BigQuery's standard SQL: + // https://cloud.google.com/bigquery/sql-reference/ Queries and views + // that reference this view must use the same flag value. + UseLegacySql bool `json:"useLegacySql,omitempty"` + + // UserDefinedFunctionResources: [Experimental] Describes user-defined + // function resources used in the query. + UserDefinedFunctionResources []*UserDefinedFunctionResource `json:"userDefinedFunctionResources,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Query") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Query") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ViewDefinition) MarshalJSON() ([]byte, error) { + type noMethod ViewDefinition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "bigquery.datasets.delete": + +type DatasetsDeleteCall struct { + s *Service + projectId string + datasetId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the dataset specified by the datasetId value. Before +// you can delete a dataset, you must delete all its tables, either +// manually or by specifying deleteContents. Immediately after deletion, +// you can create another dataset with the same name. +func (r *DatasetsService) Delete(projectId string, datasetId string) *DatasetsDeleteCall { + c := &DatasetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + return c +} + +// DeleteContents sets the optional parameter "deleteContents": If True, +// delete all the tables in the dataset. If False and the dataset +// contains tables, the request will fail. Default is False +func (c *DatasetsDeleteCall) DeleteContents(deleteContents bool) *DatasetsDeleteCall { + c.urlParams_.Set("deleteContents", fmt.Sprint(deleteContents)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsDeleteCall) Fields(s ...googleapi.Field) *DatasetsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsDeleteCall) Context(ctx context.Context) *DatasetsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.delete" call. +func (c *DatasetsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", + // "httpMethod": "DELETE", + // "id": "bigquery.datasets.delete", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of dataset being deleted", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deleteContents": { + // "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False", + // "location": "query", + // "type": "boolean" + // }, + // "projectId": { + // "description": "Project ID of the dataset being deleted", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}", + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.datasets.get": + +type DatasetsGetCall struct { + s *Service + projectId string + datasetId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the dataset specified by datasetID. +func (r *DatasetsService) Get(projectId string, datasetId string) *DatasetsGetCall { + c := &DatasetsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsGetCall) Fields(s ...googleapi.Field) *DatasetsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DatasetsGetCall) IfNoneMatch(entityTag string) *DatasetsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsGetCall) Context(ctx context.Context) *DatasetsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.get" call. +// Exactly one of *Dataset or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Dataset.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DatasetsGetCall) Do(opts ...googleapi.CallOption) (*Dataset, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Dataset{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the dataset specified by datasetID.", + // "httpMethod": "GET", + // "id": "bigquery.datasets.get", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the requested dataset", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the requested dataset", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}", + // "response": { + // "$ref": "Dataset" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "bigquery.datasets.insert": + +type DatasetsInsertCall struct { + s *Service + projectId string + dataset *Dataset + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new empty dataset. +func (r *DatasetsService) Insert(projectId string, dataset *Dataset) *DatasetsInsertCall { + c := &DatasetsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.dataset = dataset + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsInsertCall) Fields(s ...googleapi.Field) *DatasetsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsInsertCall) Context(ctx context.Context) *DatasetsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.insert" call. +// Exactly one of *Dataset or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Dataset.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DatasetsInsertCall) Do(opts ...googleapi.CallOption) (*Dataset, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Dataset{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new empty dataset.", + // "httpMethod": "POST", + // "id": "bigquery.datasets.insert", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID of the new dataset", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets", + // "request": { + // "$ref": "Dataset" + // }, + // "response": { + // "$ref": "Dataset" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.datasets.list": + +type DatasetsListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all datasets in the specified project to which you have +// been granted the READER dataset role. +func (r *DatasetsService) List(projectId string) *DatasetsListCall { + c := &DatasetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// All sets the optional parameter "all": Whether to list all datasets, +// including hidden ones +func (c *DatasetsListCall) All(all bool) *DatasetsListCall { + c.urlParams_.Set("all", fmt.Sprint(all)) + return c +} + +// Filter sets the optional parameter "filter": An expression for +// filtering the results of the request by label. The syntax is +// "labels.[:]". Multiple filters can be ANDed together by +// connecting with a space. Example: "labels.department:receiving +// labels.active". See Filtering datasets using labels for details. +func (c *DatasetsListCall) Filter(filter string) *DatasetsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results to return +func (c *DatasetsListCall) MaxResults(maxResults int64) *DatasetsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, to request the next page of results +func (c *DatasetsListCall) PageToken(pageToken string) *DatasetsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsListCall) Fields(s ...googleapi.Field) *DatasetsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DatasetsListCall) IfNoneMatch(entityTag string) *DatasetsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsListCall) Context(ctx context.Context) *DatasetsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.list" call. +// Exactly one of *DatasetList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DatasetList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DatasetsListCall) Do(opts ...googleapi.CallOption) (*DatasetList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DatasetList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.", + // "httpMethod": "GET", + // "id": "bigquery.datasets.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "all": { + // "description": "Whether to list all datasets, including hidden ones", + // "location": "query", + // "type": "boolean" + // }, + // "filter": { + // "description": "An expression for filtering the results of the request by label. The syntax is \"labels.\u003cname\u003e[:\u003cvalue\u003e]\". Multiple filters can be ANDed together by connecting with a space. Example: \"labels.department:receiving labels.active\". See Filtering datasets using labels for details.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "The maximum number of results to return", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, to request the next page of results", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the datasets to be listed", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets", + // "response": { + // "$ref": "DatasetList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DatasetsListCall) Pages(ctx context.Context, f func(*DatasetList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "bigquery.datasets.patch": + +type DatasetsPatchCall struct { + s *Service + projectId string + datasetId string + dataset *Dataset + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates information in an existing dataset. The update method +// replaces the entire dataset resource, whereas the patch method only +// replaces fields that are provided in the submitted dataset resource. +// This method supports patch semantics. +func (r *DatasetsService) Patch(projectId string, datasetId string, dataset *Dataset) *DatasetsPatchCall { + c := &DatasetsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.dataset = dataset + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsPatchCall) Fields(s ...googleapi.Field) *DatasetsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsPatchCall) Context(ctx context.Context) *DatasetsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.patch" call. +// Exactly one of *Dataset or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Dataset.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DatasetsPatchCall) Do(opts ...googleapi.CallOption) (*Dataset, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Dataset{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "bigquery.datasets.patch", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the dataset being updated", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the dataset being updated", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}", + // "request": { + // "$ref": "Dataset" + // }, + // "response": { + // "$ref": "Dataset" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.datasets.update": + +type DatasetsUpdateCall struct { + s *Service + projectId string + datasetId string + dataset *Dataset + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates information in an existing dataset. The update method +// replaces the entire dataset resource, whereas the patch method only +// replaces fields that are provided in the submitted dataset resource. +func (r *DatasetsService) Update(projectId string, datasetId string, dataset *Dataset) *DatasetsUpdateCall { + c := &DatasetsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.dataset = dataset + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DatasetsUpdateCall) Fields(s ...googleapi.Field) *DatasetsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DatasetsUpdateCall) Context(ctx context.Context) *DatasetsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DatasetsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DatasetsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.datasets.update" call. +// Exactly one of *Dataset or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Dataset.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DatasetsUpdateCall) Do(opts ...googleapi.CallOption) (*Dataset, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Dataset{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", + // "httpMethod": "PUT", + // "id": "bigquery.datasets.update", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the dataset being updated", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the dataset being updated", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}", + // "request": { + // "$ref": "Dataset" + // }, + // "response": { + // "$ref": "Dataset" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.jobs.cancel": + +type JobsCancelCall struct { + s *Service + projectId string + jobId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Requests that a job be cancelled. This call will return +// immediately, and the client will need to poll for the job status to +// see if the cancel completed successfully. Cancelled jobs may still +// incur costs. +func (r *JobsService) Cancel(projectId string, jobId string) *JobsCancelCall { + c := &JobsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.jobId = jobId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsCancelCall) Fields(s ...googleapi.Field) *JobsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *JobsCancelCall) Context(ctx context.Context) *JobsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}/cancel") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.cancel" call. +// Exactly one of *JobCancelResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *JobCancelResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *JobsCancelCall) Do(opts ...googleapi.CallOption) (*JobCancelResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &JobCancelResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.", + // "httpMethod": "POST", + // "id": "bigquery.jobs.cancel", + // "parameterOrder": [ + // "projectId", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "[Required] Job ID of the job to cancel", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "[Required] Project ID of the job to cancel", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/jobs/{jobId}/cancel", + // "response": { + // "$ref": "JobCancelResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.jobs.get": + +type JobsGetCall struct { + s *Service + projectId string + jobId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns information about a specific job. Job information is +// available for a six month period after creation. Requires that you're +// the person who ran the job, or have the Is Owner project role. +func (r *JobsService) Get(projectId string, jobId string) *JobsGetCall { + c := &JobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.jobId = jobId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsGetCall) Fields(s ...googleapi.Field) *JobsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *JobsGetCall) IfNoneMatch(entityTag string) *JobsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *JobsGetCall) Context(ctx context.Context) *JobsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.get" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *JobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", + // "httpMethod": "GET", + // "id": "bigquery.jobs.get", + // "parameterOrder": [ + // "projectId", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "[Required] Job ID of the requested job", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "[Required] Project ID of the requested job", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/jobs/{jobId}", + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "bigquery.jobs.getQueryResults": + +type JobsGetQueryResultsCall struct { + s *Service + projectId string + jobId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetQueryResults: Retrieves the results of a query job. +func (r *JobsService) GetQueryResults(projectId string, jobId string) *JobsGetQueryResultsCall { + c := &JobsGetQueryResultsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.jobId = jobId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to read +func (c *JobsGetQueryResultsCall) MaxResults(maxResults int64) *JobsGetQueryResultsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, to request the next page of results +func (c *JobsGetQueryResultsCall) PageToken(pageToken string) *JobsGetQueryResultsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartIndex sets the optional parameter "startIndex": Zero-based index +// of the starting row +func (c *JobsGetQueryResultsCall) StartIndex(startIndex uint64) *JobsGetQueryResultsCall { + c.urlParams_.Set("startIndex", fmt.Sprint(startIndex)) + return c +} + +// TimeoutMs sets the optional parameter "timeoutMs": How long to wait +// for the query to complete, in milliseconds, before returning. Default +// is 10 seconds. If the timeout passes before the job completes, the +// 'jobComplete' field in the response will be false +func (c *JobsGetQueryResultsCall) TimeoutMs(timeoutMs int64) *JobsGetQueryResultsCall { + c.urlParams_.Set("timeoutMs", fmt.Sprint(timeoutMs)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsGetQueryResultsCall) Fields(s ...googleapi.Field) *JobsGetQueryResultsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *JobsGetQueryResultsCall) IfNoneMatch(entityTag string) *JobsGetQueryResultsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *JobsGetQueryResultsCall) Context(ctx context.Context) *JobsGetQueryResultsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsGetQueryResultsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsGetQueryResultsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries/{jobId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "jobId": c.jobId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.getQueryResults" call. +// Exactly one of *GetQueryResultsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GetQueryResultsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *JobsGetQueryResultsCall) Do(opts ...googleapi.CallOption) (*GetQueryResultsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GetQueryResultsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the results of a query job.", + // "httpMethod": "GET", + // "id": "bigquery.jobs.getQueryResults", + // "parameterOrder": [ + // "projectId", + // "jobId" + // ], + // "parameters": { + // "jobId": { + // "description": "[Required] Job ID of the query job", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to read", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, to request the next page of results", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "[Required] Project ID of the query job", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "startIndex": { + // "description": "Zero-based index of the starting row", + // "format": "uint64", + // "location": "query", + // "type": "string" + // }, + // "timeoutMs": { + // "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response will be false", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // } + // }, + // "path": "projects/{projectId}/queries/{jobId}", + // "response": { + // "$ref": "GetQueryResultsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *JobsGetQueryResultsCall) Pages(ctx context.Context, f func(*GetQueryResultsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.PageToken == "" { + return nil + } + c.PageToken(x.PageToken) + } +} + +// method id "bigquery.jobs.insert": + +type JobsInsertCall struct { + s *Service + projectId string + job *Job + urlParams_ gensupport.URLParams + media_ io.Reader + mediaBuffer_ *gensupport.MediaBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context + header_ http.Header +} + +// Insert: Starts a new asynchronous job. Requires the Can View project +// role. +func (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall { + c := &JobsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.job = job + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *JobsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *JobsInsertCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.mediaBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsInsertCall) Fields(s ...googleapi.Field) *JobsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *JobsInsertCall) Context(ctx context.Context) *JobsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.job) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs") + if c.media_ != nil || c.mediaBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.mediaBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + if c.media_ != nil { + combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_) + defer combined.Close() + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if c.mediaBuffer_ != nil && c.mediaType_ != "" { + reqHeaders.Set("X-Upload-Content-Type", c.mediaType_) + } + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.insert" call. +// Exactly one of *Job or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Job.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *JobsInsertCall) Do(opts ...googleapi.CallOption) (*Job, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.mediaBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.mediaBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &Job{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts a new asynchronous job. Requires the Can View project role.", + // "httpMethod": "POST", + // "id": "bigquery.jobs.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/bigquery/v2/projects/{projectId}/jobs" + // } + // } + // }, + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID of the project that will be billed for the job", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/jobs", + // "request": { + // "$ref": "Job" + // }, + // "response": { + // "$ref": "Job" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "bigquery.jobs.list": + +type JobsListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all jobs that you started in the specified project. Job +// information is available for a six month period after creation. The +// job list is sorted in reverse chronological order, by job creation +// time. Requires the Can View project role, or the Is Owner project +// role if you set the allUsers property. +func (r *JobsService) List(projectId string) *JobsListCall { + c := &JobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// AllUsers sets the optional parameter "allUsers": Whether to display +// jobs owned by all users in the project. Default false +func (c *JobsListCall) AllUsers(allUsers bool) *JobsListCall { + c.urlParams_.Set("allUsers", fmt.Sprint(allUsers)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return +func (c *JobsListCall) MaxResults(maxResults int64) *JobsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, to request the next page of results +func (c *JobsListCall) PageToken(pageToken string) *JobsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields +// +// Possible values: +// "full" - Includes all job data +// "minimal" - Does not include the job configuration +func (c *JobsListCall) Projection(projection string) *JobsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// StateFilter sets the optional parameter "stateFilter": Filter for job +// state +// +// Possible values: +// "done" - Finished jobs +// "pending" - Pending jobs +// "running" - Running jobs +func (c *JobsListCall) StateFilter(stateFilter ...string) *JobsListCall { + c.urlParams_.SetMulti("stateFilter", append([]string{}, stateFilter...)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsListCall) Fields(s ...googleapi.Field) *JobsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *JobsListCall) IfNoneMatch(entityTag string) *JobsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *JobsListCall) Context(ctx context.Context) *JobsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.list" call. +// Exactly one of *JobList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *JobList.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *JobsListCall) Do(opts ...googleapi.CallOption) (*JobList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &JobList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", + // "httpMethod": "GET", + // "id": "bigquery.jobs.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "allUsers": { + // "description": "Whether to display jobs owned by all users in the project. Default false", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "description": "Maximum number of results to return", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, to request the next page of results", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the jobs to list", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields", + // "enum": [ + // "full", + // "minimal" + // ], + // "enumDescriptions": [ + // "Includes all job data", + // "Does not include the job configuration" + // ], + // "location": "query", + // "type": "string" + // }, + // "stateFilter": { + // "description": "Filter for job state", + // "enum": [ + // "done", + // "pending", + // "running" + // ], + // "enumDescriptions": [ + // "Finished jobs", + // "Pending jobs", + // "Running jobs" + // ], + // "location": "query", + // "repeated": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/jobs", + // "response": { + // "$ref": "JobList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *JobsListCall) Pages(ctx context.Context, f func(*JobList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "bigquery.jobs.query": + +type JobsQueryCall struct { + s *Service + projectId string + queryrequest *QueryRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Query: Runs a BigQuery SQL query synchronously and returns query +// results if the query completes within a specified timeout. +func (r *JobsService) Query(projectId string, queryrequest *QueryRequest) *JobsQueryCall { + c := &JobsQueryCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.queryrequest = queryrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *JobsQueryCall) Fields(s ...googleapi.Field) *JobsQueryCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *JobsQueryCall) Context(ctx context.Context) *JobsQueryCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *JobsQueryCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *JobsQueryCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.jobs.query" call. +// Exactly one of *QueryResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *QueryResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *JobsQueryCall) Do(opts ...googleapi.CallOption) (*QueryResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &QueryResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", + // "httpMethod": "POST", + // "id": "bigquery.jobs.query", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID of the project billed for the query", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/queries", + // "request": { + // "$ref": "QueryRequest" + // }, + // "response": { + // "$ref": "QueryResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "bigquery.projects.list": + +type ProjectsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all projects to which you have been granted any project +// role. +func (r *ProjectsService) List() *ProjectsListCall { + c := &ProjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return +func (c *ProjectsListCall) MaxResults(maxResults int64) *ProjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, to request the next page of results +func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsListCall) IfNoneMatch(entityTag string) *ProjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsListCall) Context(ctx context.Context) *ProjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.projects.list" call. +// Exactly one of *ProjectList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ProjectList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ProjectList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ProjectList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all projects to which you have been granted any project role.", + // "httpMethod": "GET", + // "id": "bigquery.projects.list", + // "parameters": { + // "maxResults": { + // "description": "Maximum number of results to return", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, to request the next page of results", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects", + // "response": { + // "$ref": "ProjectList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsListCall) Pages(ctx context.Context, f func(*ProjectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "bigquery.tabledata.insertAll": + +type TabledataInsertAllCall struct { + s *Service + projectId string + datasetId string + tableId string + tabledatainsertallrequest *TableDataInsertAllRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// InsertAll: Streams data into BigQuery one record at a time without +// needing to run a load job. Requires the WRITER dataset role. +func (r *TabledataService) InsertAll(projectId string, datasetId string, tableId string, tabledatainsertallrequest *TableDataInsertAllRequest) *TabledataInsertAllCall { + c := &TabledataInsertAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + c.tabledatainsertallrequest = tabledatainsertallrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TabledataInsertAllCall) Fields(s ...googleapi.Field) *TabledataInsertAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TabledataInsertAllCall) Context(ctx context.Context) *TabledataInsertAllCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TabledataInsertAllCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TabledataInsertAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tabledata.insertAll" call. +// Exactly one of *TableDataInsertAllResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TableDataInsertAllResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TabledataInsertAllCall) Do(opts ...googleapi.CallOption) (*TableDataInsertAllResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TableDataInsertAllResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.", + // "httpMethod": "POST", + // "id": "bigquery.tabledata.insertAll", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the destination table.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the destination table.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the destination table.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll", + // "request": { + // "$ref": "TableDataInsertAllRequest" + // }, + // "response": { + // "$ref": "TableDataInsertAllResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/bigquery.insertdata", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.tabledata.list": + +type TabledataListCall struct { + s *Service + projectId string + datasetId string + tableId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves table data from a specified set of rows. Requires the +// READER dataset role. +func (r *TabledataService) List(projectId string, datasetId string, tableId string) *TabledataListCall { + c := &TabledataListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return +func (c *TabledataListCall) MaxResults(maxResults int64) *TabledataListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, identifying the result set +func (c *TabledataListCall) PageToken(pageToken string) *TabledataListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// StartIndex sets the optional parameter "startIndex": Zero-based index +// of the starting row to read +func (c *TabledataListCall) StartIndex(startIndex uint64) *TabledataListCall { + c.urlParams_.Set("startIndex", fmt.Sprint(startIndex)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TabledataListCall) Fields(s ...googleapi.Field) *TabledataListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TabledataListCall) IfNoneMatch(entityTag string) *TabledataListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TabledataListCall) Context(ctx context.Context) *TabledataListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TabledataListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TabledataListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tabledata.list" call. +// Exactly one of *TableDataList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TableDataList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TabledataListCall) Do(opts ...googleapi.CallOption) (*TableDataList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TableDataList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.", + // "httpMethod": "GET", + // "id": "bigquery.tabledata.list", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the table to read", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, identifying the result set", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the table to read", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "startIndex": { + // "description": "Zero-based index of the starting row to read", + // "format": "uint64", + // "location": "query", + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the table to read", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data", + // "response": { + // "$ref": "TableDataList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TabledataListCall) Pages(ctx context.Context, f func(*TableDataList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.PageToken == "" { + return nil + } + c.PageToken(x.PageToken) + } +} + +// method id "bigquery.tables.delete": + +type TablesDeleteCall struct { + s *Service + projectId string + datasetId string + tableId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the table specified by tableId from the dataset. If +// the table contains data, all the data will be deleted. +func (r *TablesService) Delete(projectId string, datasetId string, tableId string) *TablesDeleteCall { + c := &TablesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesDeleteCall) Fields(s ...googleapi.Field) *TablesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesDeleteCall) Context(ctx context.Context) *TablesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.delete" call. +func (c *TablesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", + // "httpMethod": "DELETE", + // "id": "bigquery.tables.delete", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the table to delete", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the table to delete", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the table to delete", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.tables.get": + +type TablesGetCall struct { + s *Service + projectId string + datasetId string + tableId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the specified table resource by table ID. This method does +// not return the data in the table, it only returns the table resource, +// which describes the structure of this table. +func (r *TablesService) Get(projectId string, datasetId string, tableId string) *TablesGetCall { + c := &TablesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesGetCall) Fields(s ...googleapi.Field) *TablesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TablesGetCall) IfNoneMatch(entityTag string) *TablesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesGetCall) Context(ctx context.Context) *TablesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.get" call. +// Exactly one of *Table or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Table.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TablesGetCall) Do(opts ...googleapi.CallOption) (*Table, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Table{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", + // "httpMethod": "GET", + // "id": "bigquery.tables.get", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the requested table", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the requested table", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the requested table", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + // "response": { + // "$ref": "Table" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "bigquery.tables.insert": + +type TablesInsertCall struct { + s *Service + projectId string + datasetId string + table *Table + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new, empty table in the dataset. +func (r *TablesService) Insert(projectId string, datasetId string, table *Table) *TablesInsertCall { + c := &TablesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.table = table + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesInsertCall) Fields(s ...googleapi.Field) *TablesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesInsertCall) Context(ctx context.Context) *TablesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.insert" call. +// Exactly one of *Table or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Table.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TablesInsertCall) Do(opts ...googleapi.CallOption) (*Table, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Table{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new, empty table in the dataset.", + // "httpMethod": "POST", + // "id": "bigquery.tables.insert", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the new table", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the new table", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables", + // "request": { + // "$ref": "Table" + // }, + // "response": { + // "$ref": "Table" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.tables.list": + +type TablesListCall struct { + s *Service + projectId string + datasetId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all tables in the specified dataset. Requires the READER +// dataset role. +func (r *TablesService) List(projectId string, datasetId string) *TablesListCall { + c := &TablesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return +func (c *TablesListCall) MaxResults(maxResults int64) *TablesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Page token, +// returned by a previous call, to request the next page of results +func (c *TablesListCall) PageToken(pageToken string) *TablesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesListCall) Fields(s ...googleapi.Field) *TablesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TablesListCall) IfNoneMatch(entityTag string) *TablesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesListCall) Context(ctx context.Context) *TablesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.list" call. +// Exactly one of *TableList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TableList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TablesListCall) Do(opts ...googleapi.CallOption) (*TableList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TableList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all tables in the specified dataset. Requires the READER dataset role.", + // "httpMethod": "GET", + // "id": "bigquery.tables.list", + // "parameterOrder": [ + // "projectId", + // "datasetId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the tables to list", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Page token, returned by a previous call, to request the next page of results", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the tables to list", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables", + // "response": { + // "$ref": "TableList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TablesListCall) Pages(ctx context.Context, f func(*TableList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "bigquery.tables.patch": + +type TablesPatchCall struct { + s *Service + projectId string + datasetId string + tableId string + table *Table + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates information in an existing table. The update method +// replaces the entire table resource, whereas the patch method only +// replaces fields that are provided in the submitted table resource. +// This method supports patch semantics. +func (r *TablesService) Patch(projectId string, datasetId string, tableId string, table *Table) *TablesPatchCall { + c := &TablesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + c.table = table + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesPatchCall) Fields(s ...googleapi.Field) *TablesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesPatchCall) Context(ctx context.Context) *TablesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.patch" call. +// Exactly one of *Table or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Table.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TablesPatchCall) Do(opts ...googleapi.CallOption) (*Table, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Table{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "bigquery.tables.patch", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + // "request": { + // "$ref": "Table" + // }, + // "response": { + // "$ref": "Table" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigquery.tables.update": + +type TablesUpdateCall struct { + s *Service + projectId string + datasetId string + tableId string + table *Table + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates information in an existing table. The update method +// replaces the entire table resource, whereas the patch method only +// replaces fields that are provided in the submitted table resource. +func (r *TablesService) Update(projectId string, datasetId string, tableId string, table *Table) *TablesUpdateCall { + c := &TablesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.datasetId = datasetId + c.tableId = tableId + c.table = table + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TablesUpdateCall) Fields(s ...googleapi.Field) *TablesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TablesUpdateCall) Context(ctx context.Context) *TablesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TablesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TablesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders.Set("x-goog-api-client", c.s.clientHeader()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.table) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "datasetId": c.datasetId, + "tableId": c.tableId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigquery.tables.update" call. +// Exactly one of *Table or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Table.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TablesUpdateCall) Do(opts ...googleapi.CallOption) (*Table, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Table{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.", + // "httpMethod": "PUT", + // "id": "bigquery.tables.update", + // "parameterOrder": [ + // "projectId", + // "datasetId", + // "tableId" + // ], + // "parameters": { + // "datasetId": { + // "description": "Dataset ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "tableId": { + // "description": "Table ID of the table to update", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}", + // "request": { + // "$ref": "Table" + // }, + // "response": { + // "$ref": "Table" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigquery", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/vendor.json b/vendor/vendor.json index bf1591a7b..97ca80fc6 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -3266,6 +3266,12 @@ "revision": "4687d739464a2d0af89a25be0318456e0776f3ef", "revisionTime": "2017-02-23T06:09:55Z" }, + { + "checksumSHA1": "FEzQdhqmb6aqGL1lKnjOcUHIGSY=", + "path": "google.golang.org/api/bigquery/v2", + "revision": "16ab375f94503bfa0d19db78e96bffbe1a34354f", + "revisionTime": "2017-03-20T22:51:23Z" + }, { "checksumSHA1": "I9nlJJGeNBvWlH7FLtRscT6NJhw=", "path": "google.golang.org/api/cloudbilling/v1", diff --git a/website/source/docs/providers/google/r/bigquery_dataset.html.markdown b/website/source/docs/providers/google/r/bigquery_dataset.html.markdown new file mode 100644 index 000000000..5edba255d --- /dev/null +++ b/website/source/docs/providers/google/r/bigquery_dataset.html.markdown @@ -0,0 +1,80 @@ +--- +layout: "google" +page_title: "Google: google_bigquery_dataset" +sidebar_current: "docs-google-bigquery-dataset" +description: |- + Creates a dataset resource for Google BigQuery. +--- + +# google_bigquery_dataset + +Creates a dataset resource for Google BigQuery. For more information see +[the official documentation](https://cloud.google.com/bigquery/docs/) and +[API](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets). + + +## Example Usage + +```hcl +resource "google_bigquery_dataset" "default" { + dataset_id = "test" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels { + env = "default" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `dataset_id` - (Required) A unique ID for the resource. + Changing this forces a new resource to be created. + +* `project` - (Optional) The project in which the resource belongs. If it + is not provided, the provider project is used. + +* `friendly_name` - (Optional) A descriptive name for the dataset. + +* `description` - (Optional) A user-friendly description of the dataset. + +* `location` - (Optional) The geographic location where the dataset should reside. + + Possible values include `EU` and `US`. The default value is `US`. + + Changing this forces a new resource to be created. + +* `default_table_expiration_ms` - (Optional) The default lifetime of all + tables in the dataset, in milliseconds. The minimum value is 3600000 + milliseconds (one hour). + + Once this property is set, all newly-created + tables in the dataset will have an expirationTime property set to the + creation time plus the value in this property, and changing the value + will only affect new tables, not existing ones. When the + expirationTime for a given table is reached, that table will be + deleted automatically. If a table's expirationTime is modified or + removed before the table expires, or if you provide an explicit + expirationTime when creating a table, that value takes precedence + over the default expiration time indicated by this property. + + * `labels` - (Optional) A mapping of labels to assign to the resource. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `self_link` - The URI of the created resource. + +* `etag` - A hash of the resource. + +* `creation_time` - The time when this dataset was created, in milliseconds since the epoch. + +* `last_modified_time` - The date when this dataset or any of its tables was last modified, + in milliseconds since the epoch. diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb index b615b13bc..cbefd24f8 100644 --- a/website/source/layouts/google.erb +++ b/website/source/layouts/google.erb @@ -10,6 +10,15 @@ Google Provider + > + Google BigQuery Resources + + + > Google Cloud Platform Data Sources + > + Cognito Resources + + + > Config Resources