Staticcheck fixes

Fixes within the terraform package to remove staticcheck errors
This commit is contained in:
Pam Selle 2021-01-06 14:18:36 -05:00
parent 0a31fa0941
commit fb5f7b9a59
9 changed files with 34 additions and 35 deletions

View File

@ -1094,7 +1094,7 @@ func TestContext2Refresh_unknownProvider(t *testing.T) {
t.Fatal("successfully created context; want error")
}
if !regexp.MustCompile(`Failed to instantiate provider ".+"`).MatchString(diags.Err().Error()) {
if !regexp.MustCompile(`failed to instantiate provider ".+"`).MatchString(diags.Err().Error()) {
t.Fatalf("wrong error: %s", diags.Err())
}
}

View File

@ -1536,33 +1536,33 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned
// evalApplyProvisioners determines if provisioners need to be run, and if so
// executes the provisioners for a resource and returns an updated error if
// provisioning fails.
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen, applyErr error) (error, tfdiags.Diagnostics) {
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen, applyErr error) (tfdiags.Diagnostics, error) {
var diags tfdiags.Diagnostics
if state == nil {
log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr)
return applyErr, nil
return nil, applyErr
}
if applyErr != nil {
// We're already tainted, so just return out
return applyErr, nil
return nil, applyErr
}
if when == configs.ProvisionerWhenCreate && !createNew {
// If we're not creating a new resource, then don't run provisioners
log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr)
return applyErr, nil
return nil, applyErr
}
if state.Status == states.ObjectTainted {
// No point in provisioning an object that is already tainted, since
// it's going to get recreated on the next apply anyway.
log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr)
return applyErr, nil
return nil, applyErr
}
provs := filterProvisioners(n.Config, when)
if len(provs) == 0 {
// We have no provisioners, so don't do anything
return applyErr, nil
return nil, applyErr
}
// Call pre hook
@ -1570,7 +1570,7 @@ func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, st
return h.PreProvisionInstance(n.Addr, state.Value)
}))
if diags.HasErrors() {
return applyErr, diags
return diags, applyErr
}
// If there are no errors, then we append it to our output error
@ -1579,14 +1579,14 @@ func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, st
if err != nil {
applyErr = multierror.Append(applyErr, err)
log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr)
return applyErr, nil
return nil, applyErr
}
// Call post hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvisionInstance(n.Addr, state.Value)
}))
return applyErr, diags
return diags, applyErr
}
// filterProvisioners filters the provisioners on the resource to only
@ -1808,7 +1808,7 @@ func (n *NodeAbstractResourceInstance) apply(
change *plans.ResourceInstanceChange,
applyConfig *configs.Resource,
createBeforeDestroy bool,
applyError error) (*states.ResourceInstanceObject, error, tfdiags.Diagnostics) {
applyError error) (*states.ResourceInstanceObject, tfdiags.Diagnostics, error) {
var diags tfdiags.Diagnostics
if state == nil {
@ -1817,13 +1817,13 @@ func (n *NodeAbstractResourceInstance) apply(
var newState *states.ResourceInstanceObject
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return newState, applyError, diags.Append(err)
return newState, diags.Append(err), applyError
}
schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type))
return newState, applyError, diags
return newState, diags, applyError
}
log.Printf("[INFO] Starting apply for %s", n.Addr)
@ -1836,7 +1836,7 @@ func (n *NodeAbstractResourceInstance) apply(
configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return newState, applyError, diags
return newState, diags, applyError
}
}
@ -1845,13 +1845,13 @@ func (n *NodeAbstractResourceInstance) apply(
"configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)",
n.Addr,
))
return newState, applyError, diags
return newState, diags, applyError
}
metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return newState, applyError, diags
return newState, diags, applyError
}
log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action)
@ -1879,7 +1879,7 @@ func (n *NodeAbstractResourceInstance) apply(
Status: state.Status,
Value: change.After,
}
return newState, applyError, diags
return newState, diags, applyError
}
resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
@ -1948,7 +1948,7 @@ func (n *NodeAbstractResourceInstance) apply(
// Bail early in this particular case, because an object that doesn't
// conform to the schema can't be saved in the state anyway -- the
// serializer will reject it.
return newState, applyError, diags
return newState, diags, applyError
}
// After this point we have a type-conforming result object and so we
@ -2099,8 +2099,8 @@ func (n *NodeAbstractResourceInstance) apply(
// At this point, if we have an error in diags (and hadn't already returned), we return it as an error and clear the diags.
applyError = diags.Err()
log.Printf("[DEBUG] %s: apply errored", n.Addr)
return newState, applyError, nil
return newState, nil, applyError
}
return newState, applyError, diags
return newState, diags, applyError
}

View File

@ -233,7 +233,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
// Make a new diff, in case we've learned new values in the state
// during apply which we can now incorporate.
diffApply, state, planDiags := n.plan(ctx, diff, state, false)
diffApply, _, planDiags := n.plan(ctx, diff, state, false)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
@ -265,7 +265,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
}
var applyError error
state, applyError, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy(), applyError)
state, applyDiags, applyError := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy(), applyError)
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags
@ -283,7 +283,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
}
createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace())
applyError, applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate, applyError)
applyProvisionersDiags, applyError := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate, applyError)
diags = diags.Append(applyProvisionersDiags)
if diags.HasErrors() {
return diags

View File

@ -175,7 +175,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// Run destroy provisioners if not tainted
if state != nil && state.Status != states.ObjectTainted {
var applyProvisionersDiags tfdiags.Diagnostics
provisionerErr, applyProvisionersDiags = n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy, provisionerErr)
applyProvisionersDiags, provisionerErr = n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy, provisionerErr)
diags = diags.Append(applyProvisionersDiags)
if diags.HasErrors() {
return diags
@ -195,7 +195,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
if addr.Resource.Resource.Mode == addrs.ManagedResourceMode {
var applyDiags tfdiags.Diagnostics
// we pass a nil configuration to apply because we are destroying
state, provisionerErr, applyDiags = n.apply(ctx, state, changeApply, nil, false, provisionerErr)
state, applyDiags, provisionerErr = n.apply(ctx, state, changeApply, nil, false, provisionerErr)
diags.Append(applyDiags)
if diags.HasErrors() {
return diags

View File

@ -174,7 +174,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w
}
// we pass a nil configuration to apply because we are destroying
state, applyError, applyDiags := n.apply(ctx, state, change, nil, false, applyError)
state, applyDiags, applyError := n.apply(ctx, state, change, nil, false, applyError)
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags

View File

@ -106,7 +106,7 @@ func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *con
// future calls.
schemas[fqn] = &ProviderSchema{}
diags = diags.Append(
fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err),
fmt.Errorf("failed to instantiate provider %q to obtain schema: %s", name, err),
)
return
}
@ -120,7 +120,7 @@ func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *con
// future calls.
schemas[fqn] = &ProviderSchema{}
diags = diags.Append(
fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()),
fmt.Errorf("failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()),
)
return
}
@ -200,7 +200,7 @@ func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *conf
// future calls.
schemas[name] = &configschema.Block{}
diags = diags.Append(
fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err),
fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %s", name, err),
)
return
}
@ -214,7 +214,7 @@ func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *conf
// future calls.
schemas[name] = &configschema.Block{}
diags = diags.Append(
fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()),
fmt.Errorf("failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()),
)
return
}

View File

@ -22,13 +22,13 @@ func (t *ImportStateTransformer) Transform(g *Graph) error {
// This is only likely to happen in misconfigured tests
if t.Config == nil {
return fmt.Errorf("Cannot import into an empty configuration.")
return fmt.Errorf("cannot import into an empty configuration")
}
// Get the module config
modCfg := t.Config.Descendent(target.Addr.Module.Module())
if modCfg == nil {
return fmt.Errorf("Module %s not found.", target.Addr.Module.Module())
return fmt.Errorf("module %s not found", target.Addr.Module.Module())
}
providerAddr := addrs.AbsProviderConfig{

View File

@ -255,8 +255,7 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
cpm := make(map[string]*graphNodeCloseProvider)
var err error
for _, v := range pm {
p := v.(GraphNodeProvider)
for _, p := range pm {
key := p.ProviderAddr().String()
// get the close provider of this type if we alread created it

View File

@ -31,7 +31,7 @@ func (t *VertexTransformer) Transform(g *Graph) error {
if ok := g.Replace(v, newV); !ok {
// This should never happen, big problem
return fmt.Errorf(
"Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
"failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
dag.VertexName(v), dag.VertexName(newV), v, newV)
}