merge dependencies when refreshing

The resource configuration was always being used to determine
dependencies during refresh, because if there were no changes to a
resource, there was no chance to replace any incorrect stored
dependencies. Now that we are refreshing during plan, we can determine
that a resource has no changes and opt to store the new dependencies
immediately.

Here we clean up the writeResourceInstanceState calls to no longer
modify the resource instance state, removing the `dependencies`
argument. Callers are now expected to set the Dependencies field as
needed.
This commit is contained in:
James Bardin 2021-03-26 09:40:33 -04:00 committed by James Bardin
parent 985124bfa8
commit 7964328f34
9 changed files with 212 additions and 142 deletions

View File

@ -248,3 +248,124 @@ resource "test_instance" "a" {
t.Fatalf("expected apply order [b, a], got: %v\n", order)
}
}
// verify that dependencies are updated in the state during refresh and apply
func TestApply_updateDependencies(t *testing.T) {
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
fooAddr := mustResourceInstanceAddr("aws_instance.foo")
barAddr := mustResourceInstanceAddr("aws_instance.bar")
bazAddr := mustResourceInstanceAddr("aws_instance.baz")
bamAddr := mustResourceInstanceAddr("aws_instance.bam")
binAddr := mustResourceInstanceAddr("aws_instance.bin")
root.SetResourceInstanceCurrent(
fooAddr.Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"foo"}`),
Dependencies: []addrs.ConfigResource{
bazAddr.ContainingResource().Config(),
},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`),
)
root.SetResourceInstanceCurrent(
binAddr.Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"bin","type":"aws_instance","unknown":"ok"}`),
Dependencies: []addrs.ConfigResource{
bazAddr.ContainingResource().Config(),
},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`),
)
root.SetResourceInstanceCurrent(
bazAddr.Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"baz"}`),
Dependencies: []addrs.ConfigResource{
// Existing dependencies should not be removed from orphaned instances
bamAddr.ContainingResource().Config(),
},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`),
)
root.SetResourceInstanceCurrent(
barAddr.Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"bar","foo":"foo"}`),
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`),
)
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "aws_instance" "bar" {
foo = aws_instance.foo.id
}
resource "aws_instance" "foo" {
}
resource "aws_instance" "bin" {
}
`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
},
State: state,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.Err())
}
bar := plan.State.ResourceInstance(barAddr)
if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) {
t.Fatalf("bar should depend on foo after refresh, but got %s", bar.Current.Dependencies)
}
foo := plan.State.ResourceInstance(fooAddr)
if len(foo.Current.Dependencies) == 0 || !foo.Current.Dependencies[0].Equal(bazAddr.ContainingResource().Config()) {
t.Fatalf("foo should depend on baz after refresh because of the update, but got %s", foo.Current.Dependencies)
}
bin := plan.State.ResourceInstance(binAddr)
if len(bin.Current.Dependencies) != 0 {
t.Fatalf("bin should depend on nothing after refresh because there is no change, but got %s", bin.Current.Dependencies)
}
baz := plan.State.ResourceInstance(bazAddr)
if len(baz.Current.Dependencies) == 0 || !baz.Current.Dependencies[0].Equal(bamAddr.ContainingResource().Config()) {
t.Fatalf("baz should depend on bam after refresh, but got %s", baz.Current.Dependencies)
}
state, diags = ctx.Apply()
if diags.HasErrors() {
t.Fatal(diags.Err())
}
fmt.Println(state)
bar = state.ResourceInstance(barAddr)
if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) {
t.Fatalf("bar should still depend on foo after apply, but got %s", bar.Current.Dependencies)
}
foo = state.ResourceInstance(fooAddr)
if len(foo.Current.Dependencies) != 0 {
t.Fatalf("foo should have no deps after apply, but got %s", foo.Current.Dependencies)
}
}

View File

@ -1533,126 +1533,6 @@ func TestContext2Refresh_dataResourceDependsOn(t *testing.T) {
}
}
// verify that dependencies are updated in the state during refresh
func TestRefresh_updateDependencies(t *testing.T) {
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "foo",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"foo"}`),
Dependencies: []addrs.ConfigResource{
// Existing dependencies should be removed when overridden by the config
{
Module: addrs.RootModule,
Resource: addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "baz",
},
},
},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("aws"),
Module: addrs.RootModule,
},
)
root.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "baz",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"baz"}`),
Dependencies: []addrs.ConfigResource{
// Existing dependencies should not be removed from orphaned instances
{
Module: addrs.RootModule,
Resource: addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "bam",
},
},
},
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("aws"),
Module: addrs.RootModule,
},
)
root.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "aws_instance",
Name: "bar",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"bar","foo":"foo"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("aws"),
Module: addrs.RootModule,
},
)
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "aws_instance" "bar" {
foo = aws_instance.foo.id
}
resource "aws_instance" "foo" {
}
`,
})
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Config: m,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
},
State: state,
})
result, diags := ctx.Refresh()
if diags.HasErrors() {
t.Fatalf("plan errors: %s", diags.Err())
}
expect := strings.TrimSpace(`
aws_instance.bar:
ID = bar
provider = provider["registry.terraform.io/hashicorp/aws"]
foo = foo
Dependencies:
aws_instance.foo
aws_instance.baz:
ID = baz
provider = provider["registry.terraform.io/hashicorp/aws"]
Dependencies:
aws_instance.bam
aws_instance.foo:
ID = foo
provider = provider["registry.terraform.io/hashicorp/aws"]
`)
checkStateString(t, result, expect)
}
// verify that create_before_destroy is updated in the state during refresh
func TestRefresh_updateLifecycle(t *testing.T) {
state := states.NewState()

View File

@ -267,7 +267,7 @@ const (
//
// targetState determines which context state we're writing to during plan. The
// default is the global working state.
func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, dependencies []addrs.ConfigResource, targetState phaseState) error {
func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error {
absAddr := n.Addr
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
@ -290,12 +290,6 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex
return nil
}
// store the new deps in the state.
// We check for nil here because don't want to override existing dependencies on orphaned nodes.
if dependencies != nil {
obj.Dependencies = dependencies
}
if providerSchema == nil {
// Should never happen, unless our state object is nil
panic("writeResourceInstanceState used with nil ProviderSchema")
@ -526,8 +520,6 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.Re
ret := state.DeepCopy()
ret.Value = resp.NewState
ret.Private = resp.Private
ret.Dependencies = state.Dependencies
ret.CreateBeforeDestroy = state.CreateBeforeDestroy
// Call post-refresh hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {

View File

@ -145,7 +145,7 @@ func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) {
ctx.ProviderProvider = mockProvider
ctx.ProviderSchemaSchema = mockProvider.ProviderSchema()
err := node.writeResourceInstanceState(ctx, obj, nil, workingState)
err := node.writeResourceInstanceState(ctx, obj, workingState)
if err != nil {
t.Fatalf("unexpected error: %s", err.Error())
}

View File

@ -160,8 +160,7 @@ func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (di
return diags
}
// We don't write dependencies for datasources
diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, workingState))
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
if diags.HasErrors() {
return diags
}
@ -276,7 +275,11 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err())
err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
if state != nil {
// dependencies are always updated to match the configuration during apply
state.Dependencies = n.Dependencies
}
err = n.writeResourceInstanceState(ctx, state, workingState)
if err != nil {
return diags.Append(err)
}
@ -289,7 +292,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err())
err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
err = n.writeResourceInstanceState(ctx, state, workingState)
if err != nil {
return diags.Append(err)
}

View File

@ -215,7 +215,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (d
// we don't return immediately here on error, so that the state can be
// finalized
err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
err = n.writeResourceInstanceState(ctx, state, workingState)
if err != nil {
return diags.Append(err)
}

View File

@ -3,6 +3,7 @@ package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
@ -78,11 +79,11 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
// write the data source into both the refresh state and the
// working state
diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, refreshState))
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, workingState))
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
if diags.HasErrors() {
return diags
}
@ -133,9 +134,19 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
if diags.HasErrors() {
return diags
}
instanceRefreshState = s
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, n.Dependencies, refreshState))
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
// stored to state once the changes are applied. If the plan
// results in no changes, we will re-write these dependencies
// below.
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
@ -153,11 +164,74 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, n.Dependencies, workingState))
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
if diags.HasErrors() {
return diags
}
// If this plan resulted in a NoOp, then apply won't have a chance to make
// any changes to the stored dependencies. Since this is a NoOp we know
// that the stored dependencies will have no effect during apply, and we can
// write them out now.
if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
// the refresh state will be the final state for this resource, so
// finalize the dependencies here if they need to be updated.
instanceRefreshState.Dependencies = n.Dependencies
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
diags = diags.Append(n.writeChange(ctx, change, ""))
return diags
}
// mergeDeps returns the union of 2 sets of dependencies
func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
switch {
case len(a) == 0:
return b
case len(b) == 0:
return a
}
set := make(map[string]addrs.ConfigResource)
for _, dep := range a {
set[dep.String()] = dep
}
for _, dep := range b {
set[dep.String()] = dep
}
newDeps := make([]addrs.ConfigResource, 0, len(set))
for _, dep := range set {
newDeps = append(newDeps, dep)
}
return newDeps
}
func depsEqual(a, b []addrs.ConfigResource) bool {
if len(a) != len(b) {
return false
}
less := func(s []addrs.ConfigResource) func(i, j int) bool {
return func(i, j int) bool {
return s[i].String() < s[j].String()
}
}
sort.Slice(a, less(a))
sort.Slice(b, less(b))
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}

View File

@ -99,7 +99,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, refreshState))
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
if diags.HasErrors() {
return diags
}
@ -121,6 +121,6 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, nil, n.Dependencies, workingState))
diags = diags.Append(n.writeResourceInstanceState(ctx, nil, workingState))
return diags
}

View File

@ -281,6 +281,6 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di
return diags
}
diags = diags.Append(riNode.writeResourceInstanceState(ctx, state, nil, workingState))
diags = diags.Append(riNode.writeResourceInstanceState(ctx, state, workingState))
return diags
}