Merge pull request #27503 from hashicorp/jbardin/instance-apply-refactor

Fix error handling in instance apply methods
This commit is contained in:
James Bardin 2021-01-13 16:29:42 -05:00 committed by GitHub
commit 40b3f51576
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 205 additions and 127 deletions

View File

@ -12510,3 +12510,115 @@ func TestContext2Apply_errorRestorePrivateData(t *testing.T) {
t.Fatal("missing private data in state")
}
}
func TestContext2Apply_errorRestoreStatus(t *testing.T) {
// empty config to remove our resource
m := testModuleInline(t, map[string]string{
"main.tf": "",
})
p := simpleMockProvider()
p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
// We error during apply, but return the current object state.
resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops"))
// return a warning too to make sure it isn't dropped
resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warned"))
resp.NewState = req.PriorState
resp.Private = req.PlannedPrivate
return resp
}
addr := mustResourceInstanceAddr("test_object.a")
state := states.BuildState(func(s *states.SyncState) {
s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"foo"}`),
Private: []byte("private"),
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
})
ctx := testContext2(t, &ContextOpts{
Config: m,
State: state,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.Err())
}
state, diags = ctx.Apply()
errString := diags.ErrWithWarnings().Error()
if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") {
t.Fatalf("error missing expected info: %q", errString)
}
if len(diags) != 2 {
t.Fatalf("expected 1 error and 1 warning, got: %q", errString)
}
res := state.ResourceInstance(addr)
if res == nil {
t.Fatal("resource was removed from state")
}
if res.Current.Status != states.ObjectTainted {
t.Fatal("resource should still be tainted in the state")
}
if string(res.Current.Private) != "private" {
t.Fatalf("incorrect private data, got %q", res.Current.Private)
}
}
func TestContext2Apply_nonConformingResponse(t *testing.T) {
// empty config to remove our resource
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "a" {
test_string = "x"
}
`,
})
p := simpleMockProvider()
respDiags := tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("warned"))
respDiags = respDiags.Append(errors.New("oops"))
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{
// Don't lose these diagnostics
Diagnostics: respDiags,
// This state is missing required attributes, and should produce an error
NewState: cty.ObjectVal(map[string]cty.Value{
"test_string": cty.StringVal("x"),
}),
}
ctx := testContext2(t, &ContextOpts{
Config: m,
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
_, diags := ctx.Plan()
if diags.HasErrors() {
t.Fatal(diags.Err())
}
_, diags = ctx.Apply()
errString := diags.ErrWithWarnings().Error()
if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") {
t.Fatalf("error missing expected info: %q", errString)
}
// we should have more than the ones returned from the provider, and they
// should not be coalesced into a single value
if len(diags) < 3 {
t.Fatalf("incorrect diagnostics, got %d values with %s", len(diags), diags.ErrWithWarnings())
}
}

View File

@ -5,7 +5,6 @@ import (
"log"
"strings"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
@ -233,7 +232,7 @@ func (n *NodeAbstractResourceInstance) preApplyHook(ctx EvalContext, change *pla
}
// postApplyHook calls the post-Apply hook
func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *states.ResourceInstanceObject, err *error) tfdiags.Diagnostics {
func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *states.ResourceInstanceObject, err error) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
// Only managed resources have user-visible apply actions.
@ -245,12 +244,10 @@ func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *sta
newState = cty.NullVal(cty.DynamicPseudoType)
}
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostApply(n.Addr, nil, newState, *err)
return h.PostApply(n.Addr, nil, newState, err)
}))
}
diags = diags.Append(*err)
return diags
}
@ -1543,33 +1540,29 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned
// evalApplyProvisioners determines if provisioners need to be run, and if so
// executes the provisioners for a resource and returns an updated error if
// provisioning fails.
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen, applyErr error) (tfdiags.Diagnostics, error) {
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
if state == nil {
log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr)
return nil, applyErr
}
if applyErr != nil {
// We're already tainted, so just return out
return nil, applyErr
return nil
}
if when == configs.ProvisionerWhenCreate && !createNew {
// If we're not creating a new resource, then don't run provisioners
log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr)
return nil, applyErr
return nil
}
if state.Status == states.ObjectTainted {
// No point in provisioning an object that is already tainted, since
// it's going to get recreated on the next apply anyway.
log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr)
return nil, applyErr
return nil
}
provs := filterProvisioners(n.Config, when)
if len(provs) == 0 {
// We have no provisioners, so don't do anything
return nil, applyErr
return nil
}
// Call pre hook
@ -1577,23 +1570,22 @@ func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, st
return h.PreProvisionInstance(n.Addr, state.Value)
}))
if diags.HasErrors() {
return diags, applyErr
return diags
}
// If there are no errors, then we append it to our output error
// if we have one, otherwise we just output it.
err := n.applyProvisioners(ctx, state, when, provs)
if err != nil {
applyErr = multierror.Append(applyErr, err)
diags = diags.Append(err)
log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr)
return nil, applyErr
return diags
}
// Call post hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvisionInstance(n.Addr, state.Value)
}))
return diags, applyErr
}
// filterProvisioners filters the provisioners on the resource to only
@ -1814,8 +1806,7 @@ func (n *NodeAbstractResourceInstance) apply(
state *states.ResourceInstanceObject,
change *plans.ResourceInstanceChange,
applyConfig *configs.Resource,
createBeforeDestroy bool,
applyError error) (*states.ResourceInstanceObject, tfdiags.Diagnostics, error) {
createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if state == nil {
@ -1824,13 +1815,13 @@ func (n *NodeAbstractResourceInstance) apply(
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, diags.Append(err), applyError
return nil, diags.Append(err)
}
schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type))
return nil, diags, applyError
return nil, diags
}
log.Printf("[INFO] Starting apply for %s", n.Addr)
@ -1843,7 +1834,7 @@ func (n *NodeAbstractResourceInstance) apply(
configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags, applyError
return nil, diags
}
}
@ -1852,13 +1843,13 @@ func (n *NodeAbstractResourceInstance) apply(
"configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)",
n.Addr,
))
return nil, diags, applyError
return nil, diags
}
metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return nil, diags, applyError
return nil, diags
}
log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action)
@ -1870,7 +1861,6 @@ func (n *NodeAbstractResourceInstance) apply(
unmarkedBefore, beforePaths := change.Before.UnmarkDeepWithPaths()
unmarkedAfter, afterPaths := change.After.UnmarkDeepWithPaths()
var newState *states.ResourceInstanceObject
// If we have an Update action, our before and after values are equal,
// and only differ on their sensitivity, the newVal is the after val
// and we should not communicate with the provider. We do need to update
@ -1880,14 +1870,14 @@ func (n *NodeAbstractResourceInstance) apply(
eq := eqV.IsKnown() && eqV.True()
if change.Action == plans.Update && eq && !marksEqual(beforePaths, afterPaths) {
// Copy the previous state, changing only the value
newState = &states.ResourceInstanceObject{
newState := &states.ResourceInstanceObject{
CreateBeforeDestroy: state.CreateBeforeDestroy,
Dependencies: state.Dependencies,
Private: state.Private,
Status: state.Status,
Value: change.After,
}
return newState, diags, applyError
return newState, diags
}
resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
@ -1956,7 +1946,7 @@ func (n *NodeAbstractResourceInstance) apply(
// Bail early in this particular case, because an object that doesn't
// conform to the schema can't be saved in the state anyway -- the
// serializer will reject it.
return newState, diags, applyError
return nil, diags
}
// After this point we have a type-conforming result object and so we
@ -2072,35 +2062,40 @@ func (n *NodeAbstractResourceInstance) apply(
}
}
// Sometimes providers return a null value when an operation fails for some
// reason, but we'd rather keep the prior state so that the error can be
// corrected on a subsequent run. We must only do this for null new value
// though, or else we may discard partial updates the provider was able to
// complete.
if diags.HasErrors() && newVal.IsNull() {
// Otherwise, we'll continue but using the prior state as the new value,
// making this effectively a no-op. If the item really _has_ been
// deleted then our next refresh will detect that and fix it up.
// If change.Action is Create then change.Before will also be null,
// which is fine.
newState = state.DeepCopy()
}
switch {
case diags.HasErrors() && newVal.IsNull():
// Sometimes providers return a null value when an operation fails for
// some reason, but we'd rather keep the prior state so that the error
// can be corrected on a subsequent run. We must only do this for null
// new value though, or else we may discard partial updates the
// provider was able to complete. Otherwise, we'll continue using the
// prior state as the new value, making this effectively a no-op. If
// the item really _has_ been deleted then our next refresh will detect
// that and fix it up.
return state.DeepCopy(), diags
if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case
newState = &states.ResourceInstanceObject{
case diags.HasErrors() && !newVal.IsNull():
// if we have an error, make sure we restore the object status in the new state
newState := &states.ResourceInstanceObject{
Status: state.Status,
Value: newVal,
Private: resp.Private,
CreateBeforeDestroy: createBeforeDestroy,
}
return newState, diags
case !newVal.IsNull():
// Non error case with a new state
newState := &states.ResourceInstanceObject{
Status: states.ObjectReady,
Value: newVal,
Private: resp.Private,
CreateBeforeDestroy: createBeforeDestroy,
}
}
return newState, diags
if diags.HasErrors() {
// At this point, if we have an error in diags (and hadn't already returned), we return it as an error and clear the diags.
applyError = diags.Err()
log.Printf("[DEBUG] %s: apply errored", n.Addr)
return newState, nil, applyError
default:
// Non error case, were the object was deleted
return nil, diags
}
return newState, diags, applyError
}

View File

@ -264,39 +264,37 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
var applyError error
state, applyDiags, applyError := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy(), applyError)
state, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy())
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
// We clear the change out here so that future nodes don't see a change
// that is already complete.
diags = diags.Append(n.writeChange(ctx, nil, ""))
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyError)
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
err = n.writeChange(ctx, nil, "")
if err != nil {
return diags.Append(err)
}
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err())
err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
if err != nil {
return diags.Append(err)
}
// Run Provisioners
createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace())
applyProvisionersDiags, applyError := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate, applyError)
applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate)
// the provisioner errors count as port of the apply error, so we can bundle the diags
diags = diags.Append(applyProvisionersDiags)
if diags.HasErrors() {
return diags
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err())
err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
if err != nil {
return diags.Append(err)
}
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyError)
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
}
if createBeforeDestroyEnabled && applyError != nil {
if createBeforeDestroyEnabled && diags.HasErrors() {
if deposedKey == states.NotDeposed {
// This should never happen, and so it always indicates a bug.
// We should evaluate this node only if we've previously deposed
@ -329,15 +327,8 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
}
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.postApplyHook(ctx, state, &applyError))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.postApplyHook(ctx, state, diags.Err()))
diags = diags.Append(updateStateHook(ctx))
return diags
}

View File

@ -135,7 +135,6 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// These vars are updated through pointers at various stages below.
var changeApply *plans.ResourceInstanceChange
var state *states.ResourceInstanceObject
var provisionerErr error
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
@ -174,35 +173,30 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// Run destroy provisioners if not tainted
if state != nil && state.Status != states.ObjectTainted {
var applyProvisionersDiags tfdiags.Diagnostics
applyProvisionersDiags, provisionerErr = n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy, provisionerErr)
applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy)
diags = diags.Append(applyProvisionersDiags)
// keep the diags separate from the main set until we handle the cleanup
if diags.HasErrors() {
return diags
}
if provisionerErr != nil {
// If we have a provisioning error, then we just call
// the post-apply hook now.
diags = diags.Append(n.postApplyHook(ctx, state, &provisionerErr))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.postApplyHook(ctx, state, diags.Err()))
return diags
}
}
// Managed resources need to be destroyed, while data sources
// are only removed from state.
if addr.Resource.Resource.Mode == addrs.ManagedResourceMode {
var applyDiags tfdiags.Diagnostics
// we pass a nil configuration to apply because we are destroying
state, applyDiags, provisionerErr = n.apply(ctx, state, changeApply, nil, false, provisionerErr)
diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
s, d := n.apply(ctx, state, changeApply, nil, false)
state, diags = s, diags.Append(d)
// we don't return immediately here on error, so that the state can be
// finalized
err := n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState)
if err != nil {
return diags.Append(err)
}
} else {
log.Printf("[TRACE] NodeDestroyResourceInstance: removing state object for %s", n.Addr)
@ -210,11 +204,8 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
state.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider)
}
diags = diags.Append(n.postApplyHook(ctx, state, &provisionerErr))
if diags.HasErrors() {
return diags
}
// create the err value for postApplyHook
diags = diags.Append(n.postApplyHook(ctx, state, diags.Err()))
diags = diags.Append(updateStateHook(ctx))
return diags
}

View File

@ -152,13 +152,11 @@ func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v b
// GraphNodeExecutable impl.
func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
var change *plans.ResourceInstanceChange
var applyError error
// Read the state for the deposed resource instance
state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
if err != nil {
return diags.Append(err)
}
change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey)
@ -174,28 +172,19 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w
}
// we pass a nil configuration to apply because we are destroying
state, applyDiags, applyError := n.apply(ctx, state, change, nil, false, applyError)
state, applyDiags := n.apply(ctx, state, change, nil, false)
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
// don't return immediately on errors, we need to handle the state
// Always write the resource back to the state deposed. If it
// was successfully destroyed it will be pruned. If it was not, it will
// be caught on the next run.
diags = diags.Append(n.writeResourceInstanceState(ctx, state))
if diags.HasErrors() {
return diags
err = n.writeResourceInstanceState(ctx, state)
if err != nil {
return diags.Append(err)
}
diags = diags.Append(n.postApplyHook(ctx, state, &applyError))
if diags.HasErrors() {
return diags
}
if applyError != nil {
return diags.Append(applyError)
}
diags = diags.Append(n.postApplyHook(ctx, state, diags.Err()))
return diags.Append(updateStateHook(ctx))
}