configupgrade: Analysis of input configuration

In order to properly migrate the contents of resource, data, provider and
provisioner blocks we will need the provider's schema in order to
understand what is expected, so we can resolve some ambiguities inherent
in the legacy HCL AST.

This includes an initial prototype of migrating the content of resource
blocks just to verify that the information is being gathered correctly.
As with the rest of the upgrade_native.go file, this will be reorganized
significantly once the basic end-to-end flow is established and we can
see how to organize this code better.
This commit is contained in:
Martin Atkins 2018-06-28 17:26:06 -07:00
parent ccd90bcf35
commit adb88eaa16
7 changed files with 411 additions and 34 deletions

View File

@ -145,7 +145,11 @@ command and dealing with them before running this command again.
c.Ui.Output(`-----------------------------------------------------------------------------`)
}
newSources, upgradeDiags := configupgrade.Upgrade(sources)
upgrader := &configupgrade.Upgrader{
Providers: c.providerResolver(),
Provisioners: c.provisionerFactories(),
}
newSources, upgradeDiags := upgrader.Upgrade(sources)
diags = diags.Append(upgradeDiags)
if upgradeDiags.HasErrors() {
c.showDiagnostics(diags)

View File

@ -0,0 +1,207 @@
package configupgrade
import (
"fmt"
hcl1 "github.com/hashicorp/hcl"
hcl1ast "github.com/hashicorp/hcl/hcl/ast"
hcl1parser "github.com/hashicorp/hcl/hcl/parser"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/moduledeps"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/terraform"
)
// analysis is a container for the various different information gathered
// by ModuleSources.analyze.
type analysis struct {
ProviderSchemas map[string]*terraform.ProviderSchema
ProvisionerSchemas map[string]*configschema.Block
ResourceProviderType map[addrs.Resource]string
ResourceHasCount map[addrs.Resource]bool
}
// analyze processes the configuration files included inside the receiver
// and returns an assortment of information required to make decisions during
// a configuration upgrade.
func (u *Upgrader) analyze(ms ModuleSources) (*analysis, error) {
ret := &analysis{
ProviderSchemas: make(map[string]*terraform.ProviderSchema),
ProvisionerSchemas: make(map[string]*configschema.Block),
ResourceProviderType: make(map[addrs.Resource]string),
ResourceHasCount: make(map[addrs.Resource]bool),
}
m := &moduledeps.Module{
Providers: make(moduledeps.Providers),
}
// This is heavily based on terraform.ModuleTreeDependencies but
// differs in that it works directly with the HCL1 AST rather than
// the legacy config structs (and can thus outlive those) and that
// it only works on one module at a time, and so doesn't need to
// recurse into child calls.
for name, src := range ms {
if ext := fileExt(name); ext != ".tf" {
continue
}
f, err := hcl1parser.Parse(src)
if err != nil {
// If we encounter a syntax error then we'll just skip for now
// and assume that we'll catch this again when we do the upgrade.
// If not, we'll break the upgrade step of renaming .tf files to
// .tf.json if they seem to be JSON syntax.
continue
}
list, ok := f.Node.(*hcl1ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
if providersList := list.Filter("provider"); len(providersList.Items) > 0 {
providerObjs := providersList.Children()
for _, providerObj := range providerObjs.Items {
if len(providerObj.Keys) != 1 {
return nil, fmt.Errorf("provider block has wrong number of labels")
}
name := providerObj.Keys[0].Token.Value().(string)
var listVal *hcl1ast.ObjectList
if ot, ok := providerObj.Val.(*hcl1ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("provider %q: must be a block", name)
}
var versionStr string
if a := listVal.Filter("version"); len(a.Items) > 0 {
err := hcl1.DecodeObject(&versionStr, a.Items[0].Val)
if err != nil {
return nil, fmt.Errorf("Error reading version for provider %q: %s", name, err)
}
}
var constraints discovery.Constraints
if versionStr != "" {
constraints, err = discovery.ConstraintStr(versionStr).Parse()
if err != nil {
return nil, fmt.Errorf("Error parsing version for provider %q: %s", name, err)
}
}
var alias string
if a := listVal.Filter("alias"); len(a.Items) > 0 {
err := hcl1.DecodeObject(&alias, a.Items[0].Val)
if err != nil {
return nil, fmt.Errorf("Error reading alias for provider %q: %s", name, err)
}
}
inst := moduledeps.ProviderInstance(name)
if alias != "" {
inst = moduledeps.ProviderInstance(name + "." + alias)
}
m.Providers[inst] = moduledeps.ProviderDependency{
Constraints: constraints,
Reason: moduledeps.ProviderDependencyExplicit,
}
}
}
{
// For our purposes here we don't need to distinguish "resource"
// and "data" blocks -- provider references are the same for
// both of them -- so we'll just merge them together into a
// single list and iterate it.
resourceConfigsList := list.Filter("resource")
dataResourceConfigsList := list.Filter("data")
resourceConfigsList.Items = append(resourceConfigsList.Items, dataResourceConfigsList.Items...)
resourceObjs := resourceConfigsList.Children()
for _, resourceObj := range resourceObjs.Items {
if len(resourceObj.Keys) != 2 {
return nil, fmt.Errorf("resource or data block has wrong number of labels")
}
typeName := resourceObj.Keys[0].Token.Value().(string)
name := resourceObj.Keys[1].Token.Value().(string)
rAddr := addrs.Resource{
Mode: addrs.ManagedResourceMode, // not necessarily true, but good enough for our purposes here
Type: typeName,
Name: name,
}
var listVal *hcl1ast.ObjectList
if ot, ok := resourceObj.Val.(*hcl1ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("resource %q %q must be a block", typeName, name)
}
if o := listVal.Filter("count"); len(o.Items) > 0 {
ret.ResourceHasCount[rAddr] = true
}
var providerKey string
if o := listVal.Filter("provider"); len(o.Items) > 0 {
err := hcl1.DecodeObject(&providerKey, o.Items[0].Val)
if err != nil {
return nil, fmt.Errorf("Error reading provider for resource %q %q: %s", typeName, name, err)
}
}
if providerKey == "" {
providerKey = rAddr.DefaultProviderConfig().StringCompact()
}
inst := moduledeps.ProviderInstance(providerKey)
if _, exists := m.Providers[inst]; !exists {
m.Providers[inst] = moduledeps.ProviderDependency{
Reason: moduledeps.ProviderDependencyImplicit,
}
}
ret.ResourceProviderType[rAddr] = inst.Type()
}
}
}
providerFactories, err := u.Providers.ResolveProviders(m.PluginRequirements())
if err != nil {
return nil, fmt.Errorf("error resolving providers: %s", err)
}
for name, fn := range providerFactories {
provider, err := fn()
if err != nil {
return nil, fmt.Errorf("failed to load provider %q: %s", name, err)
}
// The current GetSchema interface is non-ideal. We're going to make
// this much simpler in the new interface, but we'll need to shim
// it for now.
resourceTypes := provider.Resources()
dataSources := provider.DataSources()
var resourceTypeNames, dataSourceNames []string
for _, t := range resourceTypes {
resourceTypeNames = append(resourceTypeNames, t.Name)
}
for _, t := range dataSources {
dataSourceNames = append(dataSourceNames, t.Name)
}
schema, err := provider.GetSchema(&terraform.ProviderSchemaRequest{
DataSources: dataSourceNames,
ResourceTypes: resourceTypeNames,
})
if err != nil {
return nil, fmt.Errorf("failed to get schema from provider %q: %s", name, err)
}
ret.ProviderSchemas[name] = schema
}
// TODO: Also ProvisionerSchemas
return ret, nil
}

View File

@ -30,10 +30,16 @@ import (
// warnings are also represented as "TF-UPGRADE-TODO:" comments in the
// generated source files so that users can visit them all and decide what to
// do with them.
func Upgrade(input ModuleSources) (ModuleSources, tfdiags.Diagnostics) {
func (u *Upgrader) Upgrade(input ModuleSources) (ModuleSources, tfdiags.Diagnostics) {
ret := make(ModuleSources)
var diags tfdiags.Diagnostics
an, err := u.analyze(input)
if err != nil {
diags = diags.Append(err)
return ret, diags
}
for name, src := range input {
ext := fileExt(name)
if ext == "" {
@ -91,7 +97,7 @@ func Upgrade(input ModuleSources) (ModuleSources, tfdiags.Diagnostics) {
}
// TODO: Actually rewrite this .tf file.
result, fileDiags := upgradeNativeSyntaxFile(name, src)
result, fileDiags := u.upgradeNativeSyntaxFile(name, src, an)
diags = diags.Append(fileDiags)
if fileDiags.HasErrors() {
// Leave unchanged, then.

View File

@ -17,7 +17,7 @@ import (
"github.com/hashicorp/terraform/tfdiags"
)
func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags.Diagnostics) {
func upgradeExpr(val interface{}, filename string, interp bool, an *analysis) ([]byte, tfdiags.Diagnostics) {
var buf bytes.Buffer
var diags tfdiags.Diagnostics
@ -47,7 +47,7 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
})
}
interpSrc, interpDiags := upgradeExpr(hilNode, filename, interp)
interpSrc, interpDiags := upgradeExpr(hilNode, filename, interp, an)
buf.Write(interpSrc)
diags = diags.Append(interpDiags)
@ -105,9 +105,9 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
lhsExpr := tv.Exprs[0]
rhsExpr := tv.Exprs[1]
lhsSrc, exprDiags := upgradeExpr(lhsExpr, filename, true)
lhsSrc, exprDiags := upgradeExpr(lhsExpr, filename, true, an)
diags = diags.Append(exprDiags)
rhsSrc, exprDiags := upgradeExpr(rhsExpr, filename, true)
rhsSrc, exprDiags := upgradeExpr(rhsExpr, filename, true, an)
diags = diags.Append(exprDiags)
// HIL's AST represents -foo as (0 - foo), so we'll recognize
@ -133,18 +133,18 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
buf.WriteString(", ")
}
exprSrc, exprDiags := upgradeExpr(arg, filename, true)
exprSrc, exprDiags := upgradeExpr(arg, filename, true, an)
diags = diags.Append(exprDiags)
buf.Write(exprSrc)
}
buf.WriteByte(')')
case *hilast.Conditional:
condSrc, exprDiags := upgradeExpr(tv.CondExpr, filename, true)
condSrc, exprDiags := upgradeExpr(tv.CondExpr, filename, true, an)
diags = diags.Append(exprDiags)
trueSrc, exprDiags := upgradeExpr(tv.TrueExpr, filename, true)
trueSrc, exprDiags := upgradeExpr(tv.TrueExpr, filename, true, an)
diags = diags.Append(exprDiags)
falseSrc, exprDiags := upgradeExpr(tv.FalseExpr, filename, true)
falseSrc, exprDiags := upgradeExpr(tv.FalseExpr, filename, true, an)
diags = diags.Append(exprDiags)
buf.Write(condSrc)
@ -154,9 +154,9 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
buf.Write(falseSrc)
case *hilast.Index:
targetSrc, exprDiags := upgradeExpr(tv.Target, filename, true)
targetSrc, exprDiags := upgradeExpr(tv.Target, filename, true, an)
diags = diags.Append(exprDiags)
keySrc, exprDiags := upgradeExpr(tv.Key, filename, true)
keySrc, exprDiags := upgradeExpr(tv.Key, filename, true, an)
diags = diags.Append(exprDiags)
buf.Write(targetSrc)
buf.WriteString("[")
@ -176,7 +176,7 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
// If there's only one expression and it isn't a literal string
// then we'll just output it naked, since wrapping a single
// expression in interpolation is no longer idiomatic.
interped, interpDiags := upgradeExpr(item, filename, true)
interped, interpDiags := upgradeExpr(item, filename, true, an)
diags = diags.Append(interpDiags)
buf.Write(interped)
break
@ -192,7 +192,7 @@ func upgradeExpr(val interface{}, filename string, interp bool) ([]byte, tfdiags
}
}
interped, interpDiags := upgradeExpr(item, filename, true)
interped, interpDiags := upgradeExpr(item, filename, true, an)
diags = diags.Append(interpDiags)
buf.WriteString("${")

View File

@ -3,9 +3,12 @@ package configupgrade
import (
"bytes"
"fmt"
"io"
"sort"
"strings"
"github.com/hashicorp/terraform/addrs"
version "github.com/hashicorp/go-version"
hcl1ast "github.com/hashicorp/hcl/hcl/ast"
@ -24,7 +27,7 @@ type upgradeFileResult struct {
ProviderRequirements map[string]version.Constraints
}
func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tfdiags.Diagnostics) {
func (u *Upgrader) upgradeNativeSyntaxFile(filename string, src []byte, an *analysis) (upgradeFileResult, tfdiags.Diagnostics) {
var result upgradeFileResult
var diags tfdiags.Diagnostics
@ -63,18 +66,142 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagWarning,
Summary: "Unsupported top-level attribute",
Detail: fmt.Sprintf("Attribute %q is not expected here, so its expression was not migrated.", blockType),
Detail: fmt.Sprintf("Attribute %q is not expected here, so its expression was not upgraded.", blockType),
Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(),
})
// Preserve the item as-is, using the hcl1printer package.
buf.WriteString("# TF-UPGRADE-TODO: Top-level attributes are not valid, so this was not automatically migrated.\n")
buf.WriteString("# TF-UPGRADE-TODO: Top-level attributes are not valid, so this was not automatically upgraded.\n")
hcl1printer.Fprint(&buf, item)
buf.WriteString("\n\n")
continue
}
declRange := hcl1PosRange(filename, item.Keys[0].Pos())
switch blockType {
case "resource":
if len(labels) != 2 {
// Should never happen for valid input.
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Invalid resource block",
Detail: "A resource block must have two labels: the resource type and name.",
Subject: &declRange,
})
continue
}
rAddr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: labels[0],
Name: labels[1],
}
// We should always have a schema for each provider in our analysis
// object. If not, it's a bug in the analyzer.
providerType, ok := an.ResourceProviderType[rAddr]
if !ok {
panic(fmt.Sprintf("unknown provider type for %s", rAddr.String()))
}
providerSchema, ok := an.ProviderSchemas[providerType]
if !ok {
panic(fmt.Sprintf("missing schema for provider type %q", providerType))
}
schema, ok := providerSchema.ResourceTypes[rAddr.Type]
if !ok {
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Unknown resource type",
Detail: fmt.Sprintf("The resource type %q is not known to the currently-selected version of provider %q.", rAddr.Type, providerType),
Subject: &declRange,
})
continue
}
printComments(&buf, item.LeadComment)
printBlockOpen(&buf, blockType, labels, item.LineComment)
args := body.List.Items
for i, arg := range args {
comments := adhocComments.TakeBefore(arg)
for _, group := range comments {
printComments(&buf, group)
buf.WriteByte('\n') // Extra separator after each group
}
printComments(&buf, arg.LeadComment)
name := arg.Keys[0].Token.Value().(string)
//labelKeys := arg.Keys[1:]
switch name {
// TODO: Special case for all of the "meta-arguments" allowed
// in a resource block, such as "count", "lifecycle",
// "provisioner", etc.
default:
// We'll consult the schema to see how we ought to interpret
// this item.
if _, isAttr := schema.Attributes[name]; isAttr {
// We'll tolerate a block with no labels here as a degenerate
// way to assign a map, but we can't migrate a block that has
// labels. In practice this should never happen because
// nested blocks in resource blocks did not accept labels
// prior to v0.12.
if len(arg.Keys) != 1 {
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Block where attribute was expected",
Detail: fmt.Sprintf("Within %s the name %q is an attribute name, not a block type.", rAddr.Type, name),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
continue
}
valSrc, valDiags := upgradeExpr(arg.Val, filename, true, an)
diags = diags.Append(valDiags)
printAttribute(&buf, arg.Keys[0].Token.Value().(string), valSrc, arg.LineComment)
} else if _, isBlock := schema.BlockTypes[name]; isBlock {
// TODO: Also upgrade blocks.
// In particular we need to handle the tricky case where
// a user attempts to treat a block type name like it's
// an attribute, by producing a "dynamic" block.
hcl1printer.Fprint(&buf, arg)
buf.WriteByte('\n')
} else {
if arg.Assign.IsValid() {
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Unrecognized attribute name",
Detail: fmt.Sprintf("Resource type %s does not expect an attribute named %q.", rAddr.Type, name),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
} else {
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Unrecognized block type",
Detail: fmt.Sprintf("Resource type %s does not expect blocks of type %q.", rAddr.Type, name),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
}
continue
}
}
// If we have another item and it's more than one line away
// from the current one then we'll print an extra blank line
// to retain that separation.
if (i + 1) < len(args) {
next := args[i+1]
thisPos := arg.Pos()
nextPos := next.Pos()
if nextPos.Line-thisPos.Line > 1 {
buf.WriteByte('\n')
}
}
}
buf.WriteString("}\n\n")
case "variable":
printComments(&buf, item.LeadComment)
printBlockOpen(&buf, blockType, labels, item.LineComment)
@ -85,11 +212,11 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagWarning,
Summary: "Invalid nested block",
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically migrated.", arg.Keys[0].Token.Value().(string)),
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically upgraded.", arg.Keys[0].Token.Value().(string)),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
// Preserve the item as-is, using the hcl1printer package.
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically migrated.\n")
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically upgraded.\n")
hcl1printer.Fprint(&buf, arg)
buf.WriteString("\n\n")
continue
@ -133,7 +260,7 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
// into the default case and migrate it as a normal expression.
fallthrough
default:
valSrc, valDiags := upgradeExpr(arg.Val, filename, false)
valSrc, valDiags := upgradeExpr(arg.Val, filename, false, an)
diags = diags.Append(valDiags)
printAttribute(&buf, arg.Keys[0].Token.Value().(string), valSrc, arg.LineComment)
}
@ -162,11 +289,11 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagWarning,
Summary: "Invalid nested block",
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically migrated.", arg.Keys[0].Token.Value().(string)),
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically upgraded.", arg.Keys[0].Token.Value().(string)),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
// Preserve the item as-is, using the hcl1printer package.
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically migrated.\n")
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically upgraded.\n")
hcl1printer.Fprint(&buf, arg)
buf.WriteString("\n\n")
continue
@ -186,7 +313,7 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
interp = true
}
valSrc, valDiags := upgradeExpr(arg.Val, filename, interp)
valSrc, valDiags := upgradeExpr(arg.Val, filename, interp, an)
diags = diags.Append(valDiags)
printAttribute(&buf, arg.Keys[0].Token.Value().(string), valSrc, arg.LineComment)
@ -215,11 +342,11 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagWarning,
Summary: "Invalid nested block",
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically migrated.", arg.Keys[0].Token.Value().(string)),
Detail: fmt.Sprintf("Blocks of type %q are not expected here, so this was not automatically upgraded.", arg.Keys[0].Token.Value().(string)),
Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(),
})
// Preserve the item as-is, using the hcl1printer package.
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically migrated.\n")
buf.WriteString("\n# TF-UPGRADE-TODO: Blocks are not expected here, so this was not automatically upgraded.\n")
hcl1printer.Fprint(&buf, arg)
buf.WriteString("\n\n")
continue
@ -235,7 +362,7 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
name := arg.Keys[0].Token.Value().(string)
expr := arg.Val
exprSrc, exprDiags := upgradeExpr(expr, filename, true)
exprSrc, exprDiags := upgradeExpr(expr, filename, true, an)
diags = diags.Append(exprDiags)
printAttribute(&buf, name, exprSrc, arg.LineComment)
@ -259,12 +386,12 @@ func upgradeNativeSyntaxFile(filename string, src []byte) (upgradeFileResult, tf
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagWarning,
Summary: "Unsupported root block type",
Detail: fmt.Sprintf("The block type %q is not expected here, so its content was not migrated.", blockType),
Detail: fmt.Sprintf("The block type %q is not expected here, so its content was not upgraded.", blockType),
Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(),
})
// Preserve the block as-is, using the hcl1printer package.
buf.WriteString("# TF-UPGRADE-TODO: Block type was not recognized, so this block and its contents were not automatically migrated.\n")
buf.WriteString("# TF-UPGRADE-TODO: Block type was not recognized, so this block and its contents were not automatically upgraded.\n")
hcl1printer.Fprint(&buf, item)
buf.WriteString("\n\n")
continue
@ -414,13 +541,13 @@ func (q *commentQueue) TakeBefore(node hcl1ast.Node) []*hcl1ast.CommentGroup {
func hcl1ErrSubjectRange(filename string, err error) *hcl2.Range {
if pe, isPos := err.(*hcl1parser.PosError); isPos {
return hcl1PosRange(filename, pe.Pos)
return hcl1PosRange(filename, pe.Pos).Ptr()
}
return nil
}
func hcl1PosRange(filename string, pos hcl1token.Pos) *hcl2.Range {
return &hcl2.Range{
func hcl1PosRange(filename string, pos hcl1token.Pos) hcl2.Range {
return hcl2.Range{
Filename: filename,
Start: hcl2.Pos{
Line: pos.Line,
@ -434,3 +561,9 @@ func hcl1PosRange(filename string, pos hcl1token.Pos) *hcl2.Range {
},
}
}
func passthruBlockTodo(w io.Writer, node hcl1ast.Node, msg string) {
fmt.Fprintf(w, "\n# TF-UPGRADE-TODO: %s\n", msg)
hcl1printer.Fprint(w, node)
w.Write([]byte{'\n', '\n'})
}

View File

@ -8,6 +8,9 @@ import (
"os/exec"
"path/filepath"
"testing"
testprovider "github.com/hashicorp/terraform/builtin/providers/test"
"github.com/hashicorp/terraform/terraform"
)
func TestUpgradeValid(t *testing.T) {
@ -28,6 +31,9 @@ func TestUpgradeValid(t *testing.T) {
t.Run(entry.Name(), func(t *testing.T) {
inputDir := filepath.Join(fixtureDir, entry.Name(), "input")
wantDir := filepath.Join(fixtureDir, entry.Name(), "want")
u := &Upgrader{
Providers: terraform.ResourceProviderResolverFixed(testProviders),
}
inputSrc, err := LoadModule(inputDir)
if err != nil {
@ -38,7 +44,7 @@ func TestUpgradeValid(t *testing.T) {
t.Fatal(err)
}
gotSrc, diags := Upgrade(inputSrc)
gotSrc, diags := u.Upgrade(inputSrc)
if diags.HasErrors() {
t.Error(diags.Err())
}
@ -83,7 +89,10 @@ func TestUpgradeRenameJSON(t *testing.T) {
t.Fatal(err)
}
gotSrc, diags := Upgrade(inputSrc)
u := &Upgrader{
Providers: terraform.ResourceProviderResolverFixed(testProviders),
}
gotSrc, diags := u.Upgrade(inputSrc)
if diags.HasErrors() {
t.Error(diags.Err())
}
@ -166,3 +175,9 @@ func diffSourceFilesFallback(got, want []byte) []byte {
buf.WriteString("\n")
return buf.Bytes()
}
var testProviders = map[string]terraform.ResourceProviderFactory{
"test": terraform.ResourceProviderFactory(func() (terraform.ResourceProvider, error) {
return testprovider.Provider(), nil
}),
}

View File

@ -0,0 +1,12 @@
package configupgrade
import (
"github.com/hashicorp/terraform/terraform"
)
// Upgrader is the main type in this package, containing all of the
// dependencies that are needed to perform upgrades.
type Upgrader struct {
Providers terraform.ResourceProviderResolver
Provisioners map[string]terraform.ResourceProvisionerFactory
}