core: Treat deposed objects the same as orphaned current objects

In many ways a deposed object is equivalent to an orphaned current object
in that the only action we can take with it is to destroy it. However, we
do still need to take some preparation steps in both cases: first, we must
ensure we track the upgraded version of the existing object so that we'll
be able to successfully render our plan, and secondly we must refresh the
existing object to make sure it still exists in the remote system.

We were previously doing these extra steps for orphan objects but not for
deposed ones, which meant that the behavior for deposed objects would be
subtly different and violate the invariants our callers expect in order
to display a plan. This also created the risk that a deposed object
already deleted in the remote system would become "stuck" because
Terraform would still plan to destroy it, which might cause the provider
to return an error when it tries to delete an already-absent object.

This also makes the deposed object planning take into account the
"skipPlanChanges" flag, which is important to get a correct result in
the "refresh only" planning mode.

It's a shame that we have almost identical code handling both the orphan
and deposed situations, but they differ in that the latter must call
different functions to interact with the deposed rather than the current
objects in the state. Perhaps a later change can improve on this with some
more refactoring, but this commit is already a little more disruptive than
I'd like and so I'm intentionally deferring that for another day.
This commit is contained in:
Martin Atkins 2021-05-12 15:18:25 -07:00
parent 3c8a4e6e05
commit f2adfb6e2a
13 changed files with 315 additions and 29 deletions

View File

@ -383,8 +383,8 @@ func TestLocal_planDeposedOnly(t *testing.T) {
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if p.ReadResourceCalled {
t.Fatal("ReadResource should not be called")
if !p.ReadResourceCalled {
t.Fatal("ReadResource should've been called to refresh the deposed object")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")

View File

@ -70,7 +70,7 @@ const (
func (h *UiHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (terraform.HookAction, error) {
dispAddr := addr.String()
if gen != states.CurrentGen {
dispAddr = fmt.Sprintf("%s (%s)", dispAddr, gen)
dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, gen)
}
var operation string
@ -210,9 +210,14 @@ func (h *UiHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation
return terraform.HookActionContinue, nil
}
addrStr := addr.String()
if depKey, ok := gen.(states.DeposedKey); ok {
addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey)
}
colorized := fmt.Sprintf(
h.view.colorize.Color("[reset][bold]%s: %s after %s%s"),
addr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix)
addrStr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix)
h.println(colorized)
@ -252,9 +257,14 @@ func (h *UiHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generatio
stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v)
}
addrStr := addr.String()
if depKey, ok := gen.(states.DeposedKey); ok {
addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey)
}
h.println(fmt.Sprintf(
h.view.colorize.Color("[reset][bold]%s: Refreshing state...%s"),
addr, stateIdSuffix))
addrStr, stateIdSuffix))
return terraform.HookActionContinue, nil
}

View File

@ -184,7 +184,7 @@ func TestUiHookPreApply_destroy(t *testing.T) {
<-uiState.done
result := done(t)
expectedOutput := fmt.Sprintf("test_instance.foo (%s): Destroying... [id=abc123]\n", key)
expectedOutput := fmt.Sprintf("test_instance.foo (deposed object %s): Destroying... [id=abc123]\n", key)
output := result.Stdout()
if output != expectedOutput {
t.Fatalf("Output didn't match.\nExpected: %q\nGiven: %q", expectedOutput, output)

View File

@ -776,6 +776,15 @@ func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
// We'll safety-check that here so we can return a clear message about it,
// rather than probably just generating confusing output at the UI layer.
if len(plan.Changes.Resources) != 0 {
// Some extra context in the logs in case the user reports this message
// as a bug, as a starting point for debugging.
for _, rc := range plan.Changes.Resources {
if depKey := rc.DeposedKey; depKey == states.NotDeposed {
log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr)
} else {
log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey)
}
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid refresh-only plan",

View File

@ -861,6 +861,143 @@ func TestContext2Plan_refreshOnlyMode(t *testing.T) {
}
}
func TestContext2Plan_refreshOnlyMode_deposed(t *testing.T) {
addr := mustResourceInstanceAddr("test_object.a")
deposedKey := states.DeposedKey("byebye")
// The configuration, the prior state, and the refresh result intentionally
// have different values for "test_string" so we can observe that the
// refresh took effect but the configuration change wasn't considered.
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "a" {
arg = "after"
}
output "out" {
value = test_object.a.arg
}
`,
})
state := states.BuildState(func(s *states.SyncState) {
// Note that we're intentionally recording a _deposed_ object here,
// and not including a current object, so a normal (non-refresh)
// plan would normally plan to create a new object _and_ destroy
// the deposed one, but refresh-only mode should prevent that.
s.SetResourceInstanceDeposed(addr, deposedKey, &states.ResourceInstanceObjectSrc{
AttrsJSON: []byte(`{"arg":"before"}`),
Status: states.ObjectReady,
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
})
p := simpleMockProvider()
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Provider: providers.Schema{Block: simpleTestSchema()},
ResourceTypes: map[string]providers.Schema{
"test_object": {
Block: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"arg": {Type: cty.String, Optional: true},
},
},
},
},
}
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) {
if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) {
return cty.StringVal("current"), nil
}
return v, nil
})
if err != nil {
// shouldn't get here
t.Fatalf("ReadResourceFn transform failed")
return providers.ReadResourceResponse{}
}
return providers.ReadResourceResponse{
NewState: newVal,
}
}
p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
// We should've been given the prior state JSON as our input to upgrade.
if !bytes.Contains(req.RawStateJSON, []byte("before")) {
t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON)
}
// We'll put something different in "arg" as part of upgrading, just
// so that we can verify below that PrevRunState contains the upgraded
// (but NOT refreshed) version of the object.
resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{
"arg": cty.StringVal("upgraded"),
})
return resp
}
ctx := testContext2(t, &ContextOpts{
Config: m,
State: state,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
PlanMode: plans.RefreshOnlyMode,
})
plan, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatalf("unexpected errors\n%s", diags.Err().Error())
}
if !p.UpgradeResourceStateCalled {
t.Errorf("Provider's UpgradeResourceState wasn't called; should've been")
}
if !p.ReadResourceCalled {
t.Errorf("Provider's ReadResource wasn't called; should've been")
}
if got, want := len(plan.Changes.Resources), 0; got != want {
t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources))
}
if instState := plan.PriorState.ResourceInstance(addr); instState == nil {
t.Errorf("%s has no prior state at all after plan", addr)
} else {
if obj := instState.Deposed[deposedKey]; obj == nil {
t.Errorf("%s has no deposed object after plan", addr)
} else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) {
// Should've saved the result of refreshing
t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
}
}
if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil {
t.Errorf("%s has no previous run state at all after plan", addr)
} else {
if obj := instState.Deposed[deposedKey]; obj == nil {
t.Errorf("%s has no deposed object in the previous run state", addr)
} else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) {
// Should've saved the result of upgrading
t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
}
}
// The output value should also have updated. If not, it's likely that we
// skipped updating the working state to match the refreshed state when we
// were evaluating the resource.
if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil {
t.Errorf("no change planned for output value 'out'")
} else {
outChange, err := outChangeSrc.Decode()
if err != nil {
t.Fatalf("failed to decode output value 'out': %s", err)
}
got := outChange.After
want := cty.UnknownVal(cty.String)
if !want.RawEquals(got) {
t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want)
}
}
}
func TestContext2Plan_invalidSensitiveModuleOutput(t *testing.T) {
m := testModuleInline(t, map[string]string{
"child/main.tf": `

View File

@ -64,6 +64,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
return &NodePlanDeposedResourceInstanceObject{
NodeAbstractResourceInstance: a,
DeposedKey: key,
skipRefresh: b.skipRefresh,
}
}

View File

@ -85,6 +85,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
return &NodePlanDeposedResourceInstanceObject{
NodeAbstractResourceInstance: a,
DeposedKey: key,
skipRefresh: b.skipRefresh,
skipPlanChanges: b.skipPlanChanges,
}
}

View File

@ -271,18 +271,41 @@ const (
// targetState determines which context state we're writing to during plan. The
// default is the global working state.
func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error {
return n.writeResourceInstanceStateImpl(ctx, states.NotDeposed, obj, targetState)
}
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
if deposedKey == states.NotDeposed {
// Bail out to avoid silently doing something other than what the
// caller seems to have intended.
panic("trying to write current state object using writeResourceInstanceStateDeposed")
}
return n.writeResourceInstanceStateImpl(ctx, deposedKey, obj, targetState)
}
// (this is the private common body of both writeResourceInstanceState and
// writeResourceInstanceStateDeposed. Don't call it directly; instead, use
// one of the two wrappers to be explicit about which of the instance's
// objects you are intending to write.
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
absAddr := n.Addr
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return err
}
logFuncName := "NodeAbstractResouceInstance.writeResourceInstanceState"
if deposedKey == states.NotDeposed {
log.Printf("[TRACE] %s to %s for %s", logFuncName, targetState, absAddr)
} else {
logFuncName = "NodeAbstractResouceInstance.writeResourceInstanceStateDeposed"
log.Printf("[TRACE] %s to %s for %s (deposed key %s)", logFuncName, targetState, absAddr, deposedKey)
}
var state *states.SyncState
switch targetState {
case workingState:
state = ctx.State()
case refreshState:
log.Printf("[TRACE] writeResourceInstanceState: using RefreshState for %s", absAddr)
state = ctx.RefreshState()
case prevRunState:
state = ctx.PrevRunState()
@ -298,22 +321,36 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex
return fmt.Errorf("state of type %s is not applicable to the current operation; this is a bug in Terraform", targetState)
}
// In spite of the name, this function also handles the non-deposed case
// via the writeResourceInstanceState wrapper, by setting deposedKey to
// the NotDeposed value (the zero value of DeposedKey).
var write func(src *states.ResourceInstanceObjectSrc)
if deposedKey == states.NotDeposed {
write = func(src *states.ResourceInstanceObjectSrc) {
state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider)
}
} else {
write = func(src *states.ResourceInstanceObjectSrc) {
state.SetResourceInstanceDeposed(absAddr, deposedKey, src, n.ResolvedProvider)
}
}
if obj == nil || obj.Value.IsNull() {
// No need to encode anything: we'll just write it directly.
state.SetResourceInstanceCurrent(absAddr, nil, n.ResolvedProvider)
log.Printf("[TRACE] writeResourceInstanceState: removing state object for %s", absAddr)
write(nil)
log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr)
return nil
}
if providerSchema == nil {
// Should never happen, unless our state object is nil
panic("writeResourceInstanceState used with nil ProviderSchema")
panic("writeResourceInstanceStateImpl used with nil ProviderSchema")
}
if obj != nil {
log.Printf("[TRACE] writeResourceInstanceState: writing current state object for %s", absAddr)
log.Printf("[TRACE] %s: writing state object for %s", logFuncName, absAddr)
} else {
log.Printf("[TRACE] writeResourceInstanceState: removing current state object for %s", absAddr)
log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr)
}
schema, currentVersion := (*providerSchema).SchemaForResourceAddr(absAddr.ContainingResource().Resource)
@ -329,7 +366,7 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex
return fmt.Errorf("failed to encode %s in state: %s", absAddr, err)
}
state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider)
write(src)
return nil
}
@ -457,10 +494,14 @@ func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plan
}
// refresh does a refresh for a resource
func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
absAddr := n.Addr
if deposedKey == states.NotDeposed {
log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr)
} else {
log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s (deposed object %s)", absAddr, deposedKey)
}
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return state, diags.Append(err)
@ -484,9 +525,14 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.Re
return state, diags
}
hookGen := states.CurrentGen
if deposedKey != states.NotDeposed {
hookGen = deposedKey
}
// Call pre-refresh hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreRefresh(absAddr, states.CurrentGen, state.Value)
return h.PreRefresh(absAddr, hookGen, state.Value)
}))
if diags.HasErrors() {
return state, diags
@ -558,7 +604,7 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.Re
// Call post-refresh hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostRefresh(absAddr, states.CurrentGen, priorVal, ret.Value)
return h.PostRefresh(absAddr, hookGen, priorVal, ret.Value)
}))
if diags.HasErrors() {
return ret, diags

View File

@ -30,6 +30,13 @@ type GraphNodeDeposedResourceInstanceObject interface {
type NodePlanDeposedResourceInstanceObject struct {
*NodeAbstractResourceInstance
DeposedKey states.DeposedKey
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
}
var (
@ -66,6 +73,8 @@ func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference
// GraphNodeEvalable impl.
func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
log.Printf("[TRACE] NodePlanDeposedResourceInstanceObject: planning %s deposed object %s", n.Addr, n.DeposedKey)
// Read the state for the deposed resource instance
state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey)
diags = diags.Append(err)
@ -73,13 +82,71 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk
return diags
}
// Note any upgrades that readResourceInstanceState might've done in the
// prevRunState, so that it'll conform to current schema.
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if not refreshing.
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, refreshState))
if diags.HasErrors() {
return diags
}
if !n.skipRefresh {
// Refresh this object even though it is going to be destroyed, in
// case it's already been deleted outside of Terraform. If this is a
// normal plan, providers expect a Read request to remove missing
// resources from the plan before apply, and may not handle a missing
// resource during Delete correctly. If this is a simple refresh,
// Terraform is expected to remove the missing resource from the state
// entirely
refreshedState, refreshDiags := n.refresh(ctx, n.DeposedKey, state)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, refreshedState, refreshState))
if diags.HasErrors() {
return diags
}
// If we refreshed then our subsequent planning should be in terms of
// the new object, not the original object.
state = refreshedState
}
if !n.skipPlanChanges {
var change *plans.ResourceInstanceChange
change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey)
diags = diags.Append(destroyPlanDiags)
if diags.HasErrors() {
return diags
}
// NOTE: We don't check prevent_destroy for deposed objects, even
// though we would do so here for a "current" object, because
// if we've reached a point where an object is already deposed then
// we've already planned and partially-executed a create_before_destroy
// replace and we would've checked prevent_destroy at that point. We're
// now just need to get the deposed object destroyed, because there
// should be a new object already serving as its replacement.
diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, nil, workingState))
} else {
// The working state should at least be updated with the result
// of upgrading and refreshing from above.
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, workingState))
}
return diags
}

View File

@ -33,6 +33,8 @@ func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) {
}
ctx := &MockEvalContext{
StateState: state.SyncWrapper(),
PrevRunStateState: state.DeepCopy().SyncWrapper(),
RefreshStateState: state.DeepCopy().SyncWrapper(),
ProviderProvider: p,
ProviderSchemaSchema: &ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
@ -59,16 +61,21 @@ func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) {
DeposedKey: deposedKey,
}
err := node.Execute(ctx, walkPlan)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey)
if change.ChangeSrc.Action != plans.Delete {
t.Fatalf("delete change not planned")
if !p.UpgradeResourceStateCalled {
t.Errorf("UpgradeResourceState wasn't called; should've been called to upgrade the previous run's object")
}
if !p.ReadResourceCalled {
t.Errorf("ReadResource wasn't called; should've been called to refresh the deposed object")
}
change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey)
if got, want := change.ChangeSrc.Action, plans.Delete; got != want {
t.Fatalf("wrong planned action\ngot: %s\nwant: %s", got, want)
}
}
func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) {

View File

@ -168,7 +168,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
// Refresh, maybe
if !n.skipRefresh {
s, refreshDiags := n.refresh(ctx, instanceRefreshState)
s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags

View File

@ -6,6 +6,7 @@ import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
@ -104,7 +105,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
// plan before apply, and may not handle a missing resource during
// Delete correctly. If this is a simple refresh, Terraform is
// expected to remove the missing resource from the state entirely
refreshedState, refreshDiags := n.refresh(ctx, oldState)
refreshedState, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
@ -139,6 +140,10 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon
}
diags = diags.Append(n.writeResourceInstanceState(ctx, nil, workingState))
} else {
// The working state should at least be updated with the result
// of upgrading and refreshing from above.
diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState))
}
return diags

View File

@ -7,6 +7,7 @@ import (
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
)
@ -275,7 +276,7 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di
ResolvedProvider: n.ResolvedProvider,
},
}
state, refreshDiags := riNode.refresh(ctx, state)
state, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags