Merge pull request #20012 from hashicorp/jbardin/nested-set-shims

Fix shims with deeper nested containers
This commit is contained in:
James Bardin 2019-01-15 15:40:16 -05:00 committed by GitHub
commit 30eead2df5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 114 additions and 19 deletions

View File

@ -56,6 +56,34 @@ func testResourceNested() *schema.Resource {
},
},
},
"list_block": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sub_list_block": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bool": {
Type: schema.TypeBool,
Optional: true,
},
"set": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
},
},
},
},
}
}
@ -65,7 +93,18 @@ func testResourceNestedCreate(d *schema.ResourceData, meta interface{}) error {
return testResourceNestedRead(d, meta)
}
func testResourceNestedUpdate(d *schema.ResourceData, meta interface{}) error {
return testResourceNestedRead(d, meta)
}
func testResourceNestedRead(d *schema.ResourceData, meta interface{}) error {
set := []map[string]interface{}{map[string]interface{}{
"sub_list_block": []map[string]interface{}{map[string]interface{}{
"bool": false,
"set": schema.NewSet(schema.HashString, nil),
}},
}}
d.Set("list_block", set)
return nil
}

View File

@ -31,6 +31,9 @@ resource "test_resource_nested" "foo" {
resource.TestCheckResourceAttr(
"test_resource_nested.foo", "nested.1877647874.string", "val",
),
resource.TestCheckResourceAttr(
"test_resource_nested.foo", "list_block.0.sub_list_block.0.bool", "false",
),
),
},
},
@ -195,6 +198,10 @@ resource "test_resource_nested" "foo" {
"nested.140280279.string": "",
"nested.140280279.optional": "false",
"nested.140280279.nested_again.#": "0",
"list_block.#": "1",
"list_block.0.sub_list_block.#": "1",
"list_block.0.sub_list_block.0.bool": "false",
"list_block.0.sub_list_block.0.set.#": "0",
}
delete(got, "id") // it's random, so not useful for testing

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"log"
"regexp"
"sort"
"strconv"
@ -728,27 +729,78 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A
return resp, nil
}
if newInstanceState != nil {
// here we use the planned state to check for unknown/zero containers values
// when normalizing the flatmap.
plannedState := hcl2shim.FlatmapValueFromHCL2(plannedStateVal)
newInstanceState.Attributes = normalizeFlatmapContainers(plannedState, newInstanceState.Attributes, true)
}
newStateVal := cty.NullVal(block.ImpliedType())
// We keep the null val if we destroyed the resource, otherwise build the
// entire object, even if the new state was nil.
if !destroy {
newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, block.ImpliedType())
// always return a nul value for destroy
if newInstanceState == nil || destroy {
newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType())
if err != nil {
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
return resp, nil
}
resp.NewState = &proto.DynamicValue{
Msgpack: newStateMP,
}
return resp, nil
}
// here we use the planned state to check for unknown/zero containers values
// when normalizing the flatmap.
plannedState := hcl2shim.FlatmapValueFromHCL2(plannedStateVal)
newInstanceState.Attributes = normalizeFlatmapContainers(plannedState, newInstanceState.Attributes, true)
// We keep the null val if we destroyed the resource, otherwise build the
// entire object, even if the new state was nil.
newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, block.ImpliedType())
if err != nil {
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
return resp, nil
}
newStateVal = copyMissingValues(newStateVal, plannedStateVal)
// Cycle through the shims, to ensure that the plan will create an identical
// value. Errors in this block are non-fatal (and should not happen, since
// we've already shimmed this type), because we already have an applied value
// and want to return that even if a later Plan may not agree.
prevVal := newStateVal
for i := 0; ; i++ {
shimmedState, err := res.ShimInstanceStateFromValue(prevVal)
if err != nil {
log.Printf("[ERROR] failed to shim cty.Value: %s", err)
break
}
shimmedState.Attributes = normalizeFlatmapContainers(shimmedState.Attributes, shimmedState.Attributes, false)
tmpVal, err := hcl2shim.HCL2ValueFromFlatmap(shimmedState.Attributes, block.ImpliedType())
if err != nil {
log.Printf("[ERROR] failed to shim flatmap: %s", err)
break
}
tmpVal = copyMissingValues(tmpVal, prevVal)
// If we have the same value before and after the shimming process, we
// can be reasonably certain that PlanResourceChange will return the
// same value.
if tmpVal.RawEquals(prevVal) {
newStateVal = tmpVal
break
}
if i > 2 {
// This isn't fatal, since the value as actually applied.
log.Printf("[ERROR] hcl2shims failed to converge for value: %#v\n", newStateVal)
break
}
// The values are not the same, but we're only going to try this up to 3
// times before giving up. This should account for any empty nested values
// showing up a few levels deep.
prevVal = tmpVal
}
newStateVal = copyTimeoutValues(newStateVal, plannedStateVal)
newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType())
@ -760,15 +812,12 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A
Msgpack: newStateMP,
}
if newInstanceState != nil {
meta, err := json.Marshal(newInstanceState.Meta)
if err != nil {
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
return resp, nil
}
resp.Private = meta
meta, err := json.Marshal(newInstanceState.Meta)
if err != nil {
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
return resp, nil
}
resp.Private = meta
return resp, nil
}