Merge #17358: HCL2-powered configuration loader (not yet used)

This commit is contained in:
Martin Atkins 2018-02-16 11:00:20 -08:00 committed by GitHub
commit 4f833d5682
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
173 changed files with 14151 additions and 680 deletions

24
configs/backend.go Normal file
View File

@ -0,0 +1,24 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
)
// Backend represents a "backend" block inside a "terraform" block in a module
// or file.
type Backend struct {
Type string
Config hcl.Body
TypeRange hcl.Range
DeclRange hcl.Range
}
func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) {
return &Backend{
Type: block.Labels[0],
TypeRange: block.LabelRanges[0],
Config: block.Body,
DeclRange: block.DefRange,
}, nil
}

99
configs/compat_shim.go Normal file
View File

@ -0,0 +1,99 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/zclconf/go-cty/cty"
)
// -------------------------------------------------------------------------
// Functions in this file are compatibility shims intended to ease conversion
// from the old configuration loader. Any use of these functions that makes
// a change should generate a deprecation warning explaining to the user how
// to update their code for new patterns.
//
// Shims are particularly important for any patterns that have been widely
// documented in books, tutorials, etc. Users will still be starting from
// these examples and we want to help them adopt the latest patterns rather
// than leave them stranded.
// -------------------------------------------------------------------------
// shimTraversalInString takes any arbitrary expression and checks if it is
// a quoted string in the native syntax. If it _is_, then it is parsed as a
// traversal and re-wrapped into a synthetic traversal expression and a
// warning is generated. Otherwise, the given expression is just returned
// verbatim.
//
// This function has no effect on expressions from the JSON syntax, since
// traversals in strings are the required pattern in that syntax.
//
// If wantKeyword is set, the generated warning diagnostic will talk about
// keywords rather than references. The behavior is otherwise unchanged, and
// the caller remains responsible for checking that the result is indeed
// a keyword, e.g. using hcl.ExprAsKeyword.
func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) {
if !exprIsNativeQuotedString(expr) {
return expr, nil
}
strVal, diags := expr.Value(nil)
if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() {
// Since we're not even able to attempt a shim here, we'll discard
// the diagnostics we saw so far and let the caller's own error
// handling take care of reporting the invalid expression.
return expr, nil
}
// The position handling here isn't _quite_ right because it won't
// take into account any escape sequences in the literal string, but
// it should be close enough for any error reporting to make sense.
srcRange := expr.Range()
startPos := srcRange.Start // copy
startPos.Column++ // skip initial quote
startPos.Byte++ // skip initial quote
traversal, tDiags := hclsyntax.ParseTraversalAbs(
[]byte(strVal.AsString()),
srcRange.Filename,
startPos,
)
diags = append(diags, tDiags...)
if wantKeyword {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagWarning,
Summary: "Quoted keywords are deprecated",
Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.",
Subject: &srcRange,
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagWarning,
Summary: "Quoted references are deprecated",
Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.",
Subject: &srcRange,
})
}
return &hclsyntax.ScopeTraversalExpr{
Traversal: traversal,
SrcRange: srcRange,
}, diags
}
// shimIsIgnoreChangesStar returns true if the given expression seems to be
// a string literal whose value is "*". This is used to support a legacy
// form of ignore_changes = all .
//
// This function does not itself emit any diagnostics, so it's the caller's
// responsibility to emit a warning diagnostic when this function returns true.
func shimIsIgnoreChangesStar(expr hcl.Expression) bool {
val, valDiags := expr.Value(nil)
if valDiags.HasErrors() {
return false
}
if val.Type() != cty.String || val.IsNull() || !val.IsKnown() {
return false
}
return val.AsString() == "*"
}

115
configs/config.go Normal file
View File

@ -0,0 +1,115 @@
package configs
import (
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
)
// A Config is a node in the tree of modules within a configuration.
//
// The module tree is constructed by following ModuleCall instances recursively
// through the root module transitively into descendent modules.
//
// A module tree described in *this* package represents the static tree
// represented by configuration. During evaluation a static ModuleNode may
// expand into zero or more module instances depending on the use of count and
// for_each configuration attributes within each call.
type Config struct {
// RootModule points to the Config for the root module within the same
// module tree as this module. If this module _is_ the root module then
// this is self-referential.
Root *Config
// ParentModule points to the Config for the module that directly calls
// this module. If this is the root module then this field is nil.
Parent *Config
// Path is a sequence of module logical names that traverse from the root
// module to this config. Path is empty for the root module.
//
// This should not be used to display a path to the end-user, since
// our UI conventions call for us to return a module address string in that
// case, and a module address string ought to be built from the dynamic
// module tree (resulting from evaluating "count" and "for_each" arguments
// on our calls to produce potentially multiple child instances per call)
// rather than from our static module tree.
Path []string
// ChildModules points to the Config for each of the direct child modules
// called from this module. The keys in this map match the keys in
// Module.ModuleCalls.
Children map[string]*Config
// Module points to the object describing the configuration for the
// various elements (variables, resources, etc) defined by this module.
Module *Module
// CallRange is the source range for the header of the module block that
// requested this module.
//
// This field is meaningless for the root module, where its contents are undefined.
CallRange hcl.Range
// SourceAddr is the source address that the referenced module was requested
// from, as specified in configuration.
//
// This field is meaningless for the root module, where its contents are undefined.
SourceAddr string
// SourceAddrRange is the location in the configuration source where the
// SourceAddr value was set, for use in diagnostic messages.
//
// This field is meaningless for the root module, where its contents are undefined.
SourceAddrRange hcl.Range
// Version is the specific version that was selected for this module,
// based on version constraints given in configuration.
//
// This field is nil if the module was loaded from a non-registry source,
// since versions are not supported for other sources.
//
// This field is meaningless for the root module, where it will always
// be nil.
Version *version.Version
}
// Depth returns the number of "hops" the receiver is from the root of its
// module tree, with the root module having a depth of zero.
func (c *Config) Depth() int {
ret := 0
this := c
for this.Parent != nil {
ret++
this = this.Parent
}
return ret
}
// DeepEach calls the given function once for each module in the tree, starting
// with the receiver.
//
// A parent is always called before its children and children of a particular
// node are visited in lexicographic order by their names.
func (c *Config) DeepEach(cb func(c *Config)) {
cb(c)
names := make([]string, 0, len(c.Children))
for name := range c.Children {
names = append(names, name)
}
for _, name := range names {
c.Children[name].DeepEach(cb)
}
}
// AllModules returns a slice of all the receiver and all of its descendent
// nodes in the module tree, in the same order they would be visited by
// DeepEach.
func (c *Config) AllModules() []*Config {
var ret []*Config
c.DeepEach(func(c *Config) {
ret = append(ret, c)
})
return ret
}

158
configs/config_build.go Normal file
View File

@ -0,0 +1,158 @@
package configs
import (
"sort"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
)
// BuildConfig constructs a Config from a root module by loading all of its
// descendent modules via the given ModuleWalker.
//
// The result is a module tree that has so far only had basic module- and
// file-level invariants validated. If the returned diagnostics contains errors,
// the returned module tree may be incomplete but can still be used carefully
// for static analysis.
func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) {
var diags hcl.Diagnostics
cfg := &Config{
Module: root,
}
cfg.Root = cfg // Root module is self-referential.
cfg.Children, diags = buildChildModules(cfg, walker)
return cfg, diags
}
func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) {
var diags hcl.Diagnostics
ret := map[string]*Config{}
calls := parent.Module.ModuleCalls
// We'll sort the calls by their local names so that they'll appear in a
// predictable order in any logging that's produced during the walk.
callNames := make([]string, 0, len(calls))
for k := range calls {
callNames = append(callNames, k)
}
sort.Strings(callNames)
for _, callName := range callNames {
call := calls[callName]
path := make([]string, len(parent.Path)+1)
copy(path, parent.Path)
path[len(path)-1] = call.Name
req := ModuleRequest{
Name: call.Name,
Path: path,
SourceAddr: call.SourceAddr,
SourceAddrRange: call.SourceAddrRange,
VersionConstraint: call.Version,
Parent: parent,
CallRange: call.DeclRange,
}
mod, ver, modDiags := walker.LoadModule(&req)
diags = append(diags, modDiags...)
if mod == nil {
// nil can be returned if the source address was invalid and so
// nothing could be loaded whatsoever. LoadModule should've
// returned at least one error diagnostic in that case.
continue
}
child := &Config{
Parent: parent,
Root: parent.Root,
Path: path,
Module: mod,
CallRange: call.DeclRange,
SourceAddr: call.SourceAddr,
SourceAddrRange: call.SourceAddrRange,
Version: ver,
}
child.Children, modDiags = buildChildModules(child, walker)
ret[call.Name] = child
}
return ret, diags
}
// A ModuleWalker knows how to find and load a child module given details about
// the module to be loaded and a reference to its partially-loaded parent
// Config.
type ModuleWalker interface {
// LoadModule finds and loads a requested child module.
//
// If errors are detected during loading, implementations should return them
// in the diagnostics object. If the diagnostics object contains any errors
// then the caller will tolerate the returned module being nil or incomplete.
// If no errors are returned, it should be non-nil and complete.
//
// Full validation need not have been performed but an implementation should
// ensure that the basic file- and module-validations performed by the
// LoadConfigDir function (valid syntax, no namespace collisions, etc) have
// been performed before returning a module.
LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
}
// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps
// a callback function, for more convenient use of that interface.
type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
// LoadModule implements ModuleWalker.
func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
return f(req)
}
// ModuleRequest is used with the ModuleWalker interface to describe a child
// module that must be loaded.
type ModuleRequest struct {
// Name is the "logical name" of the module call within configuration.
// This is provided in case the name is used as part of a storage key
// for the module, but implementations must otherwise treat it as an
// opaque string. It is guaranteed to have already been validated as an
// HCL identifier and UTF-8 encoded.
Name string
// Path is a list of logical names that traverse from the root module to
// this module. This can be used, for example, to form a lookup key for
// each distinct module call in a configuration, allowing for multiple
// calls with the same name at different points in the tree.
Path []string
// SourceAddr is the source address string provided by the user in
// configuration.
SourceAddr string
// SourceAddrRange is the source range for the SourceAddr value as it
// was provided in configuration. This can and should be used to generate
// diagnostics about the source address having invalid syntax, referring
// to a non-existent object, etc.
SourceAddrRange hcl.Range
// VersionConstraint is the version constraint applied to the module in
// configuration. This data structure includes the source range for
// the constraint, which can and should be used to generate diagnostics
// about constraint-related issues, such as constraints that eliminate all
// available versions of a module whose source is otherwise valid.
VersionConstraint VersionConstraint
// Parent is the partially-constructed module tree node that the loaded
// module will be added to. Callers may refer to any field of this
// structure except Children, which is still under construction when
// ModuleRequest objects are created and thus has undefined content.
// The main reason this is provided is so that full module paths can
// be constructed for uniqueness.
Parent *Config
// CallRange is the source range for the header of the "module" block
// in configuration that prompted this request. This can be used as the
// subject of an error diagnostic that relates to the module call itself,
// rather than to either its source address or its version number.
CallRange hcl.Range
}

View File

@ -0,0 +1,71 @@
package configs
import (
"fmt"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
)
func TestBuildConfig(t *testing.T) {
parser := NewParser(nil)
mod, diags := parser.LoadConfigDir("test-fixtures/config-build")
assertNoDiagnostics(t, diags)
if mod == nil {
t.Fatal("got nil root module; want non-nil")
}
versionI := 0
cfg, diags := BuildConfig(mod, ModuleWalkerFunc(
func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
// For the sake of this test we're going to just treat our
// SourceAddr as a path relative to our fixture directory.
// A "real" implementation of ModuleWalker should accept the
// various different source address syntaxes Terraform supports.
sourcePath := filepath.Join("test-fixtures/config-build", req.SourceAddr)
mod, diags := parser.LoadConfigDir(sourcePath)
version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI))
versionI++
return mod, version, diags
},
))
assertNoDiagnostics(t, diags)
if cfg == nil {
t.Fatal("got nil config; want non-nil")
}
var got []string
cfg.DeepEach(func(c *Config) {
got = append(got, fmt.Sprintf("%s %s", strings.Join(c.Path, "."), c.Version))
})
sort.Strings(got)
want := []string{
" <nil>",
"child_a 1.0.0",
"child_a.child_c 1.0.1",
"child_b 1.0.2",
"child_b.child_c 1.0.3",
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want))
}
if _, exists := cfg.Children["child_a"].Children["child_c"].Module.Outputs["hello"]; !exists {
t.Fatalf("missing output 'hello' in child_a.child_c")
}
if _, exists := cfg.Children["child_b"].Children["child_c"].Module.Outputs["hello"]; !exists {
t.Fatalf("missing output 'hello' in child_b.child_c")
}
if cfg.Children["child_a"].Children["child_c"].Module == cfg.Children["child_b"].Children["child_c"].Module {
t.Fatalf("child_a.child_c is same object as child_b.child_c; should not be")
}
}

View File

@ -0,0 +1,114 @@
package configload
import (
"io"
"os"
"path/filepath"
"strings"
)
// copyDir copies the src directory contents into dst. Both directories
// should already exist.
func copyDir(dst, src string) error {
src, err := filepath.EvalSymlinks(src)
if err != nil {
return err
}
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if path == src {
return nil
}
if strings.HasPrefix(filepath.Base(path), ".") {
// Skip any dot files
if info.IsDir() {
return filepath.SkipDir
} else {
return nil
}
}
// The "path" has the src prefixed to it. We need to join our
// destination with the path without the src on it.
dstPath := filepath.Join(dst, path[len(src):])
// we don't want to try and copy the same file over itself.
if eq, err := sameFile(path, dstPath); eq {
return nil
} else if err != nil {
return err
}
// If we have a directory, make that subdirectory, then continue
// the walk.
if info.IsDir() {
if path == filepath.Join(src, dst) {
// dst is in src; don't walk it.
return nil
}
if err := os.MkdirAll(dstPath, 0755); err != nil {
return err
}
return nil
}
// If we have a file, copy the contents.
srcF, err := os.Open(path)
if err != nil {
return err
}
defer srcF.Close()
dstF, err := os.Create(dstPath)
if err != nil {
return err
}
defer dstF.Close()
if _, err := io.Copy(dstF, srcF); err != nil {
return err
}
// Chmod it
return os.Chmod(dstPath, info.Mode())
}
return filepath.Walk(src, walkFn)
}
// sameFile tried to determine if to paths are the same file.
// If the paths don't match, we lookup the inode on supported systems.
func sameFile(a, b string) (bool, error) {
if a == b {
return true, nil
}
aIno, err := inode(a)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
bIno, err := inode(b)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
if aIno > 0 && aIno == bIno {
return true, nil
}
return false, nil
}

View File

@ -0,0 +1,4 @@
// Package configload knows how to install modules into the .terraform/modules
// directory and to load modules from those installed locations. It is used
// in conjunction with the LoadConfig function in the parent package.
package configload

View File

@ -0,0 +1,122 @@
package configload
import (
"log"
"path/filepath"
cleanhttp "github.com/hashicorp/go-cleanhttp"
getter "github.com/hashicorp/go-getter"
)
// We configure our own go-getter detector and getter sets here, because
// the set of sources we support is part of Terraform's documentation and
// so we don't want any new sources introduced in go-getter to sneak in here
// and work even though they aren't documented. This also insulates us from
// any meddling that might be done by other go-getter callers linked into our
// executable.
var goGetterDetectors = []getter.Detector{
new(getter.GitHubDetector),
new(getter.BitBucketDetector),
new(getter.S3Detector),
new(getter.FileDetector),
}
var goGetterNoDetectors = []getter.Detector{}
var goGetterDecompressors = map[string]getter.Decompressor{
"bz2": new(getter.Bzip2Decompressor),
"gz": new(getter.GzipDecompressor),
"xz": new(getter.XzDecompressor),
"zip": new(getter.ZipDecompressor),
"tar.bz2": new(getter.TarBzip2Decompressor),
"tar.tbz2": new(getter.TarBzip2Decompressor),
"tar.gz": new(getter.TarGzipDecompressor),
"tgz": new(getter.TarGzipDecompressor),
"tar.xz": new(getter.TarXzDecompressor),
"txz": new(getter.TarXzDecompressor),
}
var goGetterGetters = map[string]getter.Getter{
"file": new(getter.FileGetter),
"git": new(getter.GitGetter),
"hg": new(getter.HgGetter),
"s3": new(getter.S3Getter),
"http": getterHTTPGetter,
"https": getterHTTPGetter,
}
var getterHTTPClient = cleanhttp.DefaultClient()
var getterHTTPGetter = &getter.HttpGetter{
Client: getterHTTPClient,
Netrc: true,
}
// getWithGoGetter retrieves the package referenced in the given address
// into the installation path and then returns the full path to any subdir
// indicated in the address.
//
// The errors returned by this function are those surfaced by the underlying
// go-getter library, which have very inconsistent quality as
// end-user-actionable error messages. At this time we do not have any
// reasonable way to improve these error messages at this layer because
// the underlying errors are not separatelyr recognizable.
func getWithGoGetter(instPath, addr string) (string, error) {
packageAddr, subDir := splitAddrSubdir(addr)
log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors)
if err != nil {
return "", err
}
var realSubDir string
realAddr, realSubDir = splitAddrSubdir(realAddr)
if realSubDir != "" {
subDir = filepath.Join(realSubDir, subDir)
}
if realAddr != packageAddr {
log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
}
client := getter.Client{
Src: realAddr,
Dst: instPath,
Pwd: instPath,
Mode: getter.ClientModeDir,
Detectors: goGetterNoDetectors, // we already did detection above
Decompressors: goGetterDecompressors,
Getters: goGetterGetters,
}
err = client.Get()
if err != nil {
return "", err
}
// Our subDir string can contain wildcards until this point, so that
// e.g. a subDir of * can expand to one top-level directory in a .tar.gz
// archive. Now that we've expanded the archive successfully we must
// resolve that into a concrete path.
var finalDir string
if subDir != "" {
finalDir, err = getter.SubdirGlob(instPath, subDir)
log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
if err != nil {
return "", err
}
} else {
finalDir = instPath
}
// If we got this far then we have apparently succeeded in downloading
// the requested object!
return filepath.Clean(finalDir), nil
}

View File

@ -0,0 +1,21 @@
// +build linux darwin openbsd netbsd solaris dragonfly
package configload
import (
"fmt"
"os"
"syscall"
)
// lookup the inode of a file on posix systems
func inode(path string) (uint64, error) {
stat, err := os.Stat(path)
if err != nil {
return 0, err
}
if st, ok := stat.Sys().(*syscall.Stat_t); ok {
return st.Ino, nil
}
return 0, fmt.Errorf("could not determine file inode")
}

View File

@ -0,0 +1,21 @@
// +build freebsd
package configload
import (
"fmt"
"os"
"syscall"
)
// lookup the inode of a file on posix systems
func inode(path string) (uint64, error) {
stat, err := os.Stat(path)
if err != nil {
return 0, err
}
if st, ok := stat.Sys().(*syscall.Stat_t); ok {
return uint64(st.Ino), nil
}
return 0, fmt.Errorf("could not determine file inode")
}

View File

@ -0,0 +1,8 @@
// +build windows
package configload
// no syscall.Stat_t on windows, return 0 for inodes
func inode(path string) (uint64, error) {
return 0, nil
}

View File

@ -0,0 +1,92 @@
package configload
import (
"fmt"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/registry"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/spf13/afero"
)
// A Loader instance is the main entry-point for loading configurations via
// this package.
//
// It extends the general config-loading functionality in the parent package
// "configs" to support installation of modules from remote sources and
// loading full configurations using modules that were previously installed.
type Loader struct {
// parser is used to read configuration
parser *configs.Parser
// modules is used to install and locate descendent modules that are
// referenced (directly or indirectly) from the root module.
modules moduleMgr
}
// Config is used with NewLoader to specify configuration arguments for the
// loader.
type Config struct {
// ModulesDir is a path to a directory where descendent modules are
// (or should be) installed. (This is usually the
// .terraform/modules directory, in the common case where this package
// is being loaded from the main Terraform CLI package.)
ModulesDir string
// Services is the service discovery client to use when locating remote
// module registry endpoints. If this is nil then registry sources are
// not supported, which should be true only in specialized circumstances
// such as in tests.
Services *disco.Disco
// Creds is a credentials store for communicating with remote module
// registry endpoints. If this is nil then no credentials will be used.
Creds auth.CredentialsSource
}
// NewLoader creates and returns a loader that reads configuration from the
// real OS filesystem.
//
// The loader has some internal state about the modules that are currently
// installed, which is read from disk as part of this function. If that
// manifest cannot be read then an error will be returned.
func NewLoader(config *Config) (*Loader, error) {
fs := afero.NewOsFs()
parser := configs.NewParser(fs)
reg := registry.NewClient(config.Services, config.Creds, nil)
ret := &Loader{
parser: parser,
modules: moduleMgr{
FS: afero.Afero{Fs: fs},
CanInstall: true,
Dir: config.ModulesDir,
Services: config.Services,
Creds: config.Creds,
Registry: reg,
},
}
err := ret.modules.readModuleManifestSnapshot()
if err != nil {
return nil, fmt.Errorf("failed to read module manifest: %s", err)
}
return ret, nil
}
// Parser returns the underlying parser for this loader.
//
// This is useful for loading other sorts of files than the module directories
// that a loader deals with, since then they will share the source code cache
// for this loader and can thus be shown as snippets in diagnostic messages.
func (l *Loader) Parser() *configs.Parser {
return l.parser
}
// Sources returns the source code cache for the underlying parser of this
// loader. This is a shorthand for l.Parser().Sources().
func (l *Loader) Sources() map[string][]byte {
return l.parser.Sources()
}

View File

@ -0,0 +1,372 @@
package configload
import (
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strings"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
)
const initFromModuleRootCallName = "root"
const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "."
// InitDirFromModule populates the given directory (which must exist and be
// empty) with the contents of the module at the given source address.
//
// It does this by installing the given module and all of its descendent
// modules in a temporary root directory and then copying the installed
// files into suitable locations. As a consequence, any diagnostics it
// generates will reveal the location of this temporary directory to the
// user.
//
// This rather roundabout installation approach is taken to ensure that
// installation proceeds in a manner identical to normal module installation.
//
// If the given source address specifies a sub-directory of the given
// package then only the sub-directory and its descendents will be copied
// into the given root directory, which will cause any relative module
// references using ../ from that module to be unresolvable. Error diagnostics
// are produced in that case, to prompt the user to rewrite the source strings
// to be absolute references to the original remote module.
//
// This can be installed only on a loder that can install modules, and will
// panic otherwise. Use CanInstallModules to determine if this method can be
// used, or refer to the documentation of that method for situations where
// install ability is guaranteed.
func (l *Loader) InitDirFromModule(rootDir, sourceAddr string, hooks InstallHooks) hcl.Diagnostics {
var diags hcl.Diagnostics
// The way this function works is pretty ugly, but we accept it because
// -from-module is a less important case than normal module installation
// and so it's better to keep this ugly complexity out here rather than
// adding even more complexity to the normal module installer.
// The target directory must exist but be empty.
{
entries, err := l.modules.FS.ReadDir(rootDir)
if err != nil {
if os.IsNotExist(err) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Target directory does not exist",
Detail: fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to read target directory",
Detail: fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err),
})
}
return diags
}
haveEntries := false
for _, entry := range entries {
if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" {
continue
}
haveEntries = true
}
if haveEntries {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Can't populate non-empty directory",
Detail: fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir),
})
return diags
}
}
// We use a hidden sub-loader to manage our inner installation directory,
// but it shares dependencies with the receiver to allow it to access the
// same remote resources and ensure it populates the same source code
// cache in case .
subLoader := &Loader{
parser: l.parser,
modules: l.modules, // this is a shallow copy, so we can safely mutate below
}
// Our sub-loader will have its own independent manifest and install
// directory, so we can install with it and know we won't interfere
// with the receiver.
subLoader.modules.manifest = make(moduleManifest)
subLoader.modules.Dir = filepath.Join(rootDir, ".terraform/init-from-module")
log.Printf("[DEBUG] using a child module loader in %s to initialize working directory from %q", subLoader.modules.Dir, sourceAddr)
subLoader.modules.FS.RemoveAll(subLoader.modules.Dir) // if this fails then we'll fail on MkdirAll below too
err := subLoader.modules.FS.MkdirAll(subLoader.modules.Dir, os.ModePerm)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to create temporary directory",
Detail: fmt.Sprintf("Failed to create temporary directory %s: %s.", subLoader.modules.Dir, err),
})
return diags
}
fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr)
fakeRange := hcl.Range{
Filename: fakeFilename,
Start: hcl.Pos{
Line: 1,
Column: 1,
Byte: 0,
},
End: hcl.Pos{
Line: 1,
Column: len(fakeFilename) + 1, // not accurate if the address contains unicode, but irrelevant since we have no source cache for this anyway
Byte: len(fakeFilename),
},
}
// Now we need to create an artificial root module that will seed our
// installation process.
fakeRootModule := &configs.Module{
ModuleCalls: map[string]*configs.ModuleCall{
initFromModuleRootCallName: &configs.ModuleCall{
Name: initFromModuleRootCallName,
SourceAddr: sourceAddr,
SourceAddrRange: fakeRange,
SourceSet: true,
DeclRange: fakeRange,
},
},
}
// wrapHooks filters hook notifications to only include Download calls
// and to trim off the initFromModuleRootCallName prefix. We'll produce
// our own Install notifications directly below.
wrapHooks := installHooksInitDir{
Wrapped: hooks,
}
instDiags := subLoader.installDescendentModules(fakeRootModule, rootDir, true, wrapHooks)
diags = append(diags, instDiags...)
if instDiags.HasErrors() {
return diags
}
// If all of that succeeded then we'll now migrate what was installed
// into the final directory structure.
modulesDir := l.modules.Dir
err = subLoader.modules.FS.MkdirAll(modulesDir, os.ModePerm)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to create local modules directory",
Detail: fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
})
return diags
}
manifest := subLoader.modules.manifest
recordKeys := make([]string, 0, len(manifest))
for k := range manifest {
recordKeys = append(recordKeys, k)
}
sort.Strings(recordKeys)
for _, recordKey := range recordKeys {
record := manifest[recordKey]
if record.Key == initFromModuleRootCallName {
// We've found the module the user requested, which we must
// now copy into rootDir so it can be used directly.
log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
err := copyDir(rootDir, record.Dir)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to copy root module",
Detail: fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
})
continue
}
// We'll try to load the newly-copied module here just so we can
// sniff for any module calls that ../ out of the root directory
// and must thus be rewritten to be absolute addresses again.
// For now we can't do this rewriting automatically, but we'll
// generate an error to help the user do it manually.
mod, _ := l.parser.LoadConfigDir(rootDir) // ignore diagnostics since we're just doing value-add here anyway
for _, mc := range mod.ModuleCalls {
if pathTraversesUp(sourceAddr) {
packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
newSubdir := filepath.Join(givenSubdir, mc.SourceAddr)
if pathTraversesUp(newSubdir) {
// This should never happen in any reasonable
// configuration since this suggests a path that
// traverses up out of the package root. We'll just
// ignore this, since we'll fail soon enough anyway
// trying to resolve this path when this module is
// loaded.
continue
}
var newAddr = packageAddr
if newSubdir != "" {
newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Root module references parent directory",
Detail: fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
Subject: &mc.SourceAddrRange,
})
continue
}
}
l.modules.manifest[""] = moduleRecord{
Key: "",
Dir: rootDir,
}
continue
}
if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
// Ignore the *real* root module, whose key is empty, since
// we're only interested in the module named "root" and its
// descendents.
continue
}
newKey := record.Key[len(initFromModuleRootKeyPrefix):]
instPath := filepath.Join(l.modules.Dir, newKey)
tempPath := filepath.Join(subLoader.modules.Dir, record.Key)
// tempPath won't be present for a module that was installed from
// a relative path, so in that case we just record the installation
// directory and assume it was already copied into place as part
// of its parent.
if _, err := os.Stat(tempPath); err != nil {
if !os.IsNotExist(err) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to stat temporary module install directory",
Detail: fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
})
continue
}
var parentKey string
if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
parentKey = newKey[:lastDot]
} else {
parentKey = "" // parent is the root module
}
parentOld := manifest[initFromModuleRootKeyPrefix+parentKey]
parentNew := l.modules.manifest[parentKey]
// We need to figure out which portion of our directory is the
// parent package path and which portion is the subdirectory
// under that.
baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newDir := filepath.Join(parentNew.Dir, baseDirRel)
log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir)
newRecord := record // shallow copy
newRecord.Dir = newDir
newRecord.Key = newKey
l.modules.manifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
continue
}
err = subLoader.modules.FS.MkdirAll(instPath, os.ModePerm)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to create module install directory",
Detail: fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err),
})
continue
}
// We copy rather than "rename" here because renaming between directories
// can be tricky in edge-cases like network filesystems, etc.
log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
err := copyDir(instPath, tempPath)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to copy descendent module",
Detail: fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err),
})
continue
}
subDir, err := filepath.Rel(tempPath, record.Dir)
if err != nil {
// Should never happen, because we constructed both directories
// from the same base and so they must have a common prefix.
panic(err)
}
newRecord := record // shallow copy
newRecord.Dir = filepath.Join(instPath, subDir)
newRecord.Key = newKey
l.modules.manifest[newKey] = newRecord
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
}
err = l.modules.writeModuleManifestSnapshot()
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to write module manifest",
Detail: fmt.Sprintf("Error writing module manifest: %s.", err),
})
}
if !diags.HasErrors() {
// Try to clean up our temporary directory, but don't worry if we don't
// succeed since it shouldn't hurt anything.
subLoader.modules.FS.RemoveAll(subLoader.modules.Dir)
}
return diags
}
func pathTraversesUp(path string) bool {
return strings.HasPrefix(filepath.ToSlash(path), "../")
}
// installHooksInitDir is an adapter wrapper for an InstallHooks that
// does some fakery to make downloads look like they are happening in their
// final locations, rather than in the temporary loader we use.
//
// It also suppresses "Install" calls entirely, since InitDirFromModule
// does its own installation steps after the initial installation pass
// has completed.
type installHooksInitDir struct {
Wrapped InstallHooks
InstallHooksImpl
}
func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) {
if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) {
// We won't announce the root module, since hook implementations
// don't expect to see that and the caller will usually have produced
// its own user-facing notification about what it's doing anyway.
return
}
trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):]
h.Wrapped.Download(trimAddr, packageAddr, version)
}

View File

@ -0,0 +1,92 @@
package configload
import (
"os"
"path/filepath"
"strings"
"testing"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/configs"
)
func TestLoaderInitDirFromModule_registry(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("this test accesses registry.terraform.io and github.com; set TF_ACC=1 to run it")
}
fixtureDir := filepath.Clean("test-fixtures/empty")
loader, done := tempChdirLoader(t, fixtureDir)
defer done()
hooks := &testInstallHooks{}
diags := loader.InitDirFromModule(".", "hashicorp/module-installer-acctest/aws//examples/main", hooks)
assertNoDiagnostics(t, diags)
v := version.Must(version.NewVersion("0.0.1"))
wantCalls := []testInstallHookCall{
// The module specified to populate the root directory is not mentioned
// here, because the hook mechanism is defined to talk about descendent
// modules only and so a caller to InitDirFromModule is expected to
// produce its own user-facing announcement about the root module being
// installed.
// Note that "root" in the following examples is, confusingly, the
// label on the module block in the example we've installed here:
// module "root" {
{
Name: "Download",
ModuleAddr: "root",
PackageAddr: "hashicorp/module-installer-acctest/aws",
Version: v,
},
{
Name: "Install",
ModuleAddr: "root",
Version: v,
LocalPath: ".terraform/modules/root/hashicorp-terraform-aws-module-installer-acctest-5e87aff",
},
{
Name: "Install",
ModuleAddr: "root.child_a",
LocalPath: ".terraform/modules/root/hashicorp-terraform-aws-module-installer-acctest-5e87aff/modules/child_a",
},
{
Name: "Install",
ModuleAddr: "root.child_a.child_b",
LocalPath: ".terraform/modules/root/hashicorp-terraform-aws-module-installer-acctest-5e87aff/modules/child_b",
},
}
if assertResultDeepEqual(t, hooks.Calls, wantCalls) {
return
}
// Make sure the configuration is loadable now.
// (This ensures that correct information is recorded in the manifest.)
config, loadDiags := loader.LoadConfig(".")
if assertNoDiagnostics(t, loadDiags) {
return
}
wantTraces := map[string]string{
"": "in example",
"root": "in root module",
"root.child_a": "in child_a module",
"root.child_a.child_b": "in child_b module",
}
gotTraces := map[string]string{}
config.DeepEach(func(c *configs.Config) {
path := strings.Join(c.Path, ".")
if c.Module.Variables["v"] == nil {
gotTraces[path] = "<missing>"
return
}
varDesc := c.Module.Variables["v"].Description
gotTraces[path] = varDesc
})
assertResultDeepEqual(t, gotTraces, wantTraces)
}

View File

@ -0,0 +1,522 @@
package configload
import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/registry"
"github.com/hashicorp/terraform/registry/regsrc"
)
// InstallModules analyses the root module in the given directory and installs
// all of its direct and transitive dependencies into the loader's modules
// directory, which must already exist.
//
// Since InstallModules makes possibly-time-consuming calls to remote services,
// a hook interface is supported to allow the caller to be notified when
// each module is installed and, for remote modules, when downloading begins.
// LoadConfig guarantees that two hook calls will not happen concurrently but
// it does not guarantee any particular ordering of hook calls. This mechanism
// is for UI feedback only and does not give the caller any control over the
// process.
//
// If modules are already installed in the target directory, they will be
// skipped unless their source address or version have changed or unless
// the upgrade flag is set.
//
// InstallModules never deletes any directory, except in the case where it
// needs to replace a directory that is already present with a newly-extracted
// package.
//
// If the returned diagnostics contains errors then the module installation
// may have wholly or partially completed. Modules must be loaded in order
// to find their dependencies, so this function does many of the same checks
// as LoadConfig as a side-effect.
//
// This function will panic if called on a loader that cannot install modules.
// Use CanInstallModules to determine if a loader can install modules, or
// refer to the documentation for that method for situations where module
// installation capability is guaranteed.
func (l *Loader) InstallModules(rootDir string, upgrade bool, hooks InstallHooks) hcl.Diagnostics {
if !l.CanInstallModules() {
panic(fmt.Errorf("InstallModules called on loader that cannot install modules"))
}
rootMod, diags := l.parser.LoadConfigDir(rootDir)
if rootMod == nil {
return diags
}
instDiags := l.installDescendentModules(rootMod, rootDir, upgrade, hooks)
diags = append(diags, instDiags...)
return diags
}
func (l *Loader) installDescendentModules(rootMod *configs.Module, rootDir string, upgrade bool, hooks InstallHooks) hcl.Diagnostics {
var diags hcl.Diagnostics
if hooks == nil {
// Use our no-op implementation as a placeholder
hooks = InstallHooksImpl{}
}
// Create a manifest record for the root module. This will be used if
// there are any relative-pathed modules in the root.
l.modules.manifest[""] = moduleRecord{
Key: "",
Dir: rootDir,
}
_, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(
func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
key := manifestKey(req.Path)
instPath := l.packageInstallPath(req.Path)
log.Printf("[DEBUG] Module installer: begin %s", key)
// First we'll check if we need to upgrade/replace an existing
// installed module, and delete it out of the way if so.
replace := upgrade
if !replace {
record, recorded := l.modules.manifest[key]
switch {
case !recorded:
log.Printf("[TRACE] %s is not yet installed", key)
replace = true
case record.SourceAddr != req.SourceAddr:
log.Printf("[TRACE] %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr)
replace = true
case record.Version != nil && !req.VersionConstraint.Required.Check(record.Version):
log.Printf("[TRACE] %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraint.Required)
replace = true
}
}
// If we _are_ planning to replace this module, then we'll remove
// it now so our installation code below won't conflict with any
// existing remnants.
if replace {
if _, recorded := l.modules.manifest[key]; recorded {
log.Printf("[TRACE] discarding previous record of %s prior to reinstall", key)
}
delete(l.modules.manifest, key)
// Deleting a module invalidates all of its descendent modules too.
keyPrefix := key + "."
for subKey := range l.modules.manifest {
if strings.HasPrefix(subKey, keyPrefix) {
if _, recorded := l.modules.manifest[subKey]; recorded {
log.Printf("[TRACE] also discarding downstream %s", subKey)
}
delete(l.modules.manifest, subKey)
}
}
}
record, recorded := l.modules.manifest[key]
if !recorded {
// Clean up any stale cache directory that might be present.
// If this is a local (relative) source then the dir will
// not exist, but we'll ignore that.
log.Printf("[TRACE] cleaning directory %s prior to install of %s", instPath, key)
err := l.modules.FS.RemoveAll(instPath)
if err != nil && !os.IsNotExist(err) {
log.Printf("[TRACE] failed to remove %s: %s", key, err)
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to remove local module cache",
Detail: fmt.Sprintf(
"Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s",
instPath, err,
),
Subject: &req.CallRange,
})
return nil, nil, diags
}
} else {
// If this module is already recorded and its root directory
// exists then we will just load what's already there and
// keep our existing record.
info, err := l.modules.FS.Stat(record.Dir)
if err == nil && info.IsDir() {
mod, mDiags := l.parser.LoadConfigDir(record.Dir)
diags = append(diags, mDiags...)
log.Printf("[TRACE] Module installer: %s %s already installed in %s", key, record.Version, record.Dir)
return mod, record.Version, diags
}
}
// If we get down here then it's finally time to actually install
// the module. There are some variants to this process depending
// on what type of module source address we have.
switch {
case isLocalSourceAddr(req.SourceAddr):
log.Printf("[TRACE] %s has local path %q", key, req.SourceAddr)
mod, mDiags := l.installLocalModule(req, key, hooks)
diags = append(diags, mDiags...)
return mod, nil, diags
case isRegistrySourceAddr(req.SourceAddr):
addr, err := regsrc.ParseModuleSource(req.SourceAddr)
if err != nil {
// Should never happen because isRegistrySourceAddr already validated
panic(err)
}
log.Printf("[TRACE] %s is a registry module at %s", key, addr)
mod, v, mDiags := l.installRegistryModule(req, key, instPath, addr, hooks)
diags = append(diags, mDiags...)
return mod, v, diags
default:
log.Printf("[TRACE] %s address %q will be handled by go-getter", key, req.SourceAddr)
mod, mDiags := l.installGoGetterModule(req, key, instPath, hooks)
diags = append(diags, mDiags...)
return mod, nil, diags
}
},
))
diags = append(diags, cDiags...)
err := l.modules.writeModuleManifestSnapshot()
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to update module manifest",
Detail: fmt.Sprintf("Unable to write the module manifest file: %s", err),
})
}
return diags
}
// CanInstallModules returns true if InstallModules can be used with this
// loader.
//
// Loaders created with NewLoader can always install modules. Loaders created
// from plan files (where the configuration is embedded in the plan file itself)
// cannot install modules, because the plan file is read-only.
func (l *Loader) CanInstallModules() bool {
return l.modules.CanInstall
}
func (l *Loader) installLocalModule(req *configs.ModuleRequest, key string, hooks InstallHooks) (*configs.Module, hcl.Diagnostics) {
var diags hcl.Diagnostics
parentKey := manifestKey(req.Parent.Path)
parentRecord, recorded := l.modules.manifest[parentKey]
if !recorded {
// This is indicative of a bug rather than a user-actionable error
panic(fmt.Errorf("missing manifest record for parent module %s", parentKey))
}
if len(req.VersionConstraint.Required) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid version constraint",
Detail: "A version constraint cannot be applied to a module at a relative local path.",
Subject: &req.VersionConstraint.DeclRange,
})
}
// For local sources we don't actually need to modify the
// filesystem at all because the parent already wrote
// the files we need, and so we just load up what's already here.
newDir := filepath.Join(parentRecord.Dir, req.SourceAddr)
log.Printf("[TRACE] %s uses directory from parent: %s", key, newDir)
mod, mDiags := l.parser.LoadConfigDir(newDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unreadable module directory",
Detail: fmt.Sprintf("The directory %s could not be read.", newDir),
Subject: &req.SourceAddrRange,
})
} else {
diags = append(diags, mDiags...)
}
// Note the local location in our manifest.
l.modules.manifest[key] = moduleRecord{
Key: key,
Dir: newDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir)
hooks.Install(key, nil, newDir)
return mod, diags
}
func (l *Loader) installRegistryModule(req *configs.ModuleRequest, key string, instPath string, addr *regsrc.Module, hooks InstallHooks) (*configs.Module, *version.Version, hcl.Diagnostics) {
var diags hcl.Diagnostics
hostname, err := addr.SvcHost()
if err != nil {
// If it looks like the user was trying to use punycode then we'll generate
// a specialized error for that case. We require the unicode form of
// hostname so that hostnames are always human-readable in configuration
// and punycode can't be used to hide a malicious module hostname.
if strings.HasPrefix(addr.RawHost.Raw, "xn--") {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module registry hostname",
Detail: "The hostname portion of this source address is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.",
Subject: &req.SourceAddrRange,
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module registry hostname",
Detail: "The hostname portion of this source address is not a valid hostname.",
Subject: &req.SourceAddrRange,
})
}
return nil, nil, diags
}
reg := l.modules.Registry
log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname)
resp, err := reg.Versions(addr)
if err != nil {
if registry.IsModuleNotFound(err) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Module not found",
Detail: fmt.Sprintf("The specified module could not be found in the module registry at %s.", hostname),
Subject: &req.SourceAddrRange,
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Error accessing remote module registry",
Detail: fmt.Sprintf("Failed to retrieve available versions for this module from %s: %s.", hostname, err),
Subject: &req.SourceAddrRange,
})
}
return nil, nil, diags
}
// The response might contain information about dependencies to allow us
// to potentially optimize future requests, but we don't currently do that
// and so for now we'll just take the first item which is guaranteed to
// be the address we requested.
if len(resp.Modules) < 1 {
// Should never happen, but since this is a remote service that may
// be implemented by third-parties we will handle it gracefully.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid response from remote module registry",
Detail: fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for this module.", hostname),
Subject: &req.SourceAddrRange,
})
return nil, nil, diags
}
modMeta := resp.Modules[0]
var latestMatch *version.Version
var latestVersion *version.Version
for _, mv := range modMeta.Versions {
v, err := version.NewVersion(mv.Version)
if err != nil {
// Should never happen if the registry server is compliant with
// the protocol, but we'll warn if not to assist someone who
// might be developing a module registry server.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagWarning,
Summary: "Invalid response from remote module registry",
Detail: fmt.Sprintf("The registry at %s returned an invalid version string %q for this module, which Terraform ignored.", hostname, mv.Version),
Subject: &req.SourceAddrRange,
})
continue
}
// If we've found a pre-release version then we'll ignore it unless
// it was exactly requested.
if v.Prerelease() != "" && req.VersionConstraint.Required.String() != v.String() {
log.Printf("[TRACE] %s ignoring %s because it is a pre-release and was not requested exactly", key, v)
continue
}
if latestVersion == nil || v.GreaterThan(latestVersion) {
latestVersion = v
}
if req.VersionConstraint.Required.Check(v) {
if latestMatch == nil || v.GreaterThan(latestMatch) {
latestMatch = v
}
}
}
if latestVersion == nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Module has no versions",
Detail: fmt.Sprintf("The specified module does not have any available versions."),
Subject: &req.SourceAddrRange,
})
return nil, nil, diags
}
if latestMatch == nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unresolvable module version constraint",
Detail: fmt.Sprintf("There is no available version of %q that matches the given version constraint. The newest available version is %s.", addr, latestVersion),
Subject: &req.VersionConstraint.DeclRange,
})
return nil, nil, diags
}
// Report up to the caller that we're about to start downloading.
packageAddr, _ := splitAddrSubdir(req.SourceAddr)
hooks.Download(key, packageAddr, latestMatch)
// If we manage to get down here then we've found a suitable version to
// install, so we need to ask the registry where we should download it from.
// The response to this is a go-getter-style address string.
dlAddr, err := reg.Location(addr, latestMatch.String())
if err != nil {
log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err)
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid response from remote module registry",
Detail: fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch),
Subject: &req.VersionConstraint.DeclRange,
})
return nil, nil, diags
}
log.Printf("[TRACE] %s %s %s is available at %q", key, addr, latestMatch, dlAddr)
modDir, err := getWithGoGetter(instPath, dlAddr)
if err != nil {
// Errors returned by go-getter have very inconsistent quality as
// end-user error messages, but for now we're accepting that because
// we have no way to recognize any specific errors to improve them
// and masking the error entirely would hide valuable diagnostic
// information from the user.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to download module",
Detail: fmt.Sprintf("Error attempting to download module source code from %q: %s", dlAddr, err),
Subject: &req.CallRange,
})
return nil, nil, diags
}
log.Printf("[TRACE] %s %q was downloaded to %s", key, dlAddr, modDir)
if addr.RawSubmodule != "" {
// Append the user's requested subdirectory to any subdirectory that
// was implied by any of the nested layers we expanded within go-getter.
modDir = filepath.Join(modDir, addr.RawSubmodule)
}
log.Printf("[TRACE] %s should now be at %s", key, modDir)
// Finally we are ready to try actually loading the module.
mod, mDiags := l.parser.LoadConfigDir(modDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here. For registry modules this actually
// indicates a bug in the code above, since it's not the
// user's responsibility to create the directory in this case.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unreadable module directory",
Detail: fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
Subject: &req.CallRange,
})
} else {
diags = append(diags, mDiags...)
}
// Note the local location in our manifest.
l.modules.manifest[key] = moduleRecord{
Key: key,
Version: latestMatch,
Dir: modDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
hooks.Install(key, latestMatch, modDir)
return mod, latestMatch, diags
}
func (l *Loader) installGoGetterModule(req *configs.ModuleRequest, key string, instPath string, hooks InstallHooks) (*configs.Module, hcl.Diagnostics) {
var diags hcl.Diagnostics
// Report up to the caller that we're about to start downloading.
packageAddr, _ := splitAddrSubdir(req.SourceAddr)
hooks.Download(key, packageAddr, nil)
modDir, err := getWithGoGetter(instPath, req.SourceAddr)
if err != nil {
// Errors returned by go-getter have very inconsistent quality as
// end-user error messages, but for now we're accepting that because
// we have no way to recognize any specific errors to improve them
// and masking the error entirely would hide valuable diagnostic
// information from the user.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to download module",
Detail: fmt.Sprintf("Error attempting to download module source code from %q: %s", packageAddr, err),
Subject: &req.SourceAddrRange,
})
return nil, diags
}
log.Printf("[TRACE] %s %q was downloaded to %s", key, req.SourceAddr, modDir)
mod, mDiags := l.parser.LoadConfigDir(modDir)
if mod == nil {
// nil indicates missing or unreadable directory, so we'll
// discard the returned diags and return a more specific
// error message here. For registry modules this actually
// indicates a bug in the code above, since it's not the
// user's responsibility to create the directory in this case.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unreadable module directory",
Detail: fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
Subject: &req.CallRange,
})
} else {
diags = append(diags, mDiags...)
}
// Note the local location in our manifest.
l.modules.manifest[key] = moduleRecord{
Key: key,
Dir: modDir,
SourceAddr: req.SourceAddr,
}
log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
hooks.Install(key, nil, modDir)
return mod, diags
}
func (l *Loader) packageInstallPath(modulePath []string) string {
return filepath.Join(l.modules.Dir, strings.Join(modulePath, "."))
}

View File

@ -0,0 +1,34 @@
package configload
import version "github.com/hashicorp/go-version"
// InstallHooks is an interface used to provide notifications about the
// installation process being orchestrated by InstallModules.
//
// This interface may have new methods added in future, so implementers should
// embed InstallHooksImpl to get no-op implementations of any unimplemented
// methods.
type InstallHooks interface {
// Download is called for modules that are retrieved from a remote source
// before that download begins, to allow a caller to give feedback
// on progress through a possibly-long sequence of downloads.
Download(moduleAddr, packageAddr string, version *version.Version)
// Install is called for each module that is installed, even if it did
// not need to be downloaded from a remote source.
Install(moduleAddr string, version *version.Version, localPath string)
}
// InstallHooksImpl is a do-nothing implementation of InstallHooks that
// can be embedded in another implementation struct to allow only partial
// implementation of the interface.
type InstallHooksImpl struct {
}
func (h InstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) {
}
func (h InstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) {
}
var _ InstallHooks = InstallHooksImpl{}

View File

@ -0,0 +1,323 @@
package configload
import (
"os"
"path/filepath"
"strings"
"testing"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/configs"
)
func TestLoaderInstallModules_local(t *testing.T) {
fixtureDir := filepath.Clean("test-fixtures/local-modules")
loader, done := tempChdirLoader(t, fixtureDir)
defer done()
hooks := &testInstallHooks{}
diags := loader.InstallModules(".", false, hooks)
assertNoDiagnostics(t, diags)
wantCalls := []testInstallHookCall{
{
Name: "Install",
ModuleAddr: "child_a",
PackageAddr: "",
LocalPath: "child_a",
},
{
Name: "Install",
ModuleAddr: "child_a.child_b",
PackageAddr: "",
LocalPath: "child_a/child_b",
},
}
if assertResultDeepEqual(t, hooks.Calls, wantCalls) {
return
}
// Make sure the configuration is loadable now.
// (This ensures that correct information is recorded in the manifest.)
config, loadDiags := loader.LoadConfig(".")
assertNoDiagnostics(t, loadDiags)
wantTraces := map[string]string{
"": "in root module",
"child_a": "in child_a module",
"child_a.child_b": "in child_b module",
}
gotTraces := map[string]string{}
config.DeepEach(func(c *configs.Config) {
path := strings.Join(c.Path, ".")
if c.Module.Variables["v"] == nil {
gotTraces[path] = "<missing>"
return
}
varDesc := c.Module.Variables["v"].Description
gotTraces[path] = varDesc
})
assertResultDeepEqual(t, gotTraces, wantTraces)
}
func TestLoaderInstallModules_registry(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("this test accesses registry.terraform.io and github.com; set TF_ACC=1 to run it")
}
fixtureDir := filepath.Clean("test-fixtures/registry-modules")
loader, done := tempChdirLoader(t, fixtureDir)
defer done()
hooks := &testInstallHooks{}
diags := loader.InstallModules(".", false, hooks)
assertNoDiagnostics(t, diags)
v := version.Must(version.NewVersion("0.0.1"))
wantCalls := []testInstallHookCall{
// the configuration builder visits each level of calls in lexicographical
// order by name, so the following list is kept in the same order.
// acctest_child_a accesses //modules/child_a directly
{
Name: "Download",
ModuleAddr: "acctest_child_a",
PackageAddr: "hashicorp/module-installer-acctest/aws", // intentionally excludes the subdir because we're downloading the whole package here
Version: v,
},
{
Name: "Install",
ModuleAddr: "acctest_child_a",
Version: v,
LocalPath: ".terraform/modules/acctest_child_a/hashicorp-terraform-aws-module-installer-acctest-853d038/modules/child_a",
},
// acctest_child_a.child_b
// (no download because it's a relative path inside acctest_child_a)
{
Name: "Install",
ModuleAddr: "acctest_child_a.child_b",
LocalPath: ".terraform/modules/acctest_child_a/hashicorp-terraform-aws-module-installer-acctest-853d038/modules/child_b",
},
// acctest_child_b accesses //modules/child_b directly
{
Name: "Download",
ModuleAddr: "acctest_child_b",
PackageAddr: "hashicorp/module-installer-acctest/aws", // intentionally excludes the subdir because we're downloading the whole package here
Version: v,
},
{
Name: "Install",
ModuleAddr: "acctest_child_b",
Version: v,
LocalPath: ".terraform/modules/acctest_child_b/hashicorp-terraform-aws-module-installer-acctest-853d038/modules/child_b",
},
// acctest_root
{
Name: "Download",
ModuleAddr: "acctest_root",
PackageAddr: "hashicorp/module-installer-acctest/aws",
Version: v,
},
{
Name: "Install",
ModuleAddr: "acctest_root",
Version: v,
LocalPath: ".terraform/modules/acctest_root/hashicorp-terraform-aws-module-installer-acctest-853d038",
},
// acctest_root.child_a
// (no download because it's a relative path inside acctest_root)
{
Name: "Install",
ModuleAddr: "acctest_root.child_a",
LocalPath: ".terraform/modules/acctest_root/hashicorp-terraform-aws-module-installer-acctest-853d038/modules/child_a",
},
// acctest_root.child_a.child_b
// (no download because it's a relative path inside acctest_root, via acctest_root.child_a)
{
Name: "Install",
ModuleAddr: "acctest_root.child_a.child_b",
LocalPath: ".terraform/modules/acctest_root/hashicorp-terraform-aws-module-installer-acctest-853d038/modules/child_b",
},
}
if assertResultDeepEqual(t, hooks.Calls, wantCalls) {
return
}
// Make sure the configuration is loadable now.
// (This ensures that correct information is recorded in the manifest.)
config, loadDiags := loader.LoadConfig(".")
assertNoDiagnostics(t, loadDiags)
wantTraces := map[string]string{
"": "in local caller for registry-modules",
"acctest_root": "in root module",
"acctest_root.child_a": "in child_a module",
"acctest_root.child_a.child_b": "in child_b module",
"acctest_child_a": "in child_a module",
"acctest_child_a.child_b": "in child_b module",
"acctest_child_b": "in child_b module",
}
gotTraces := map[string]string{}
config.DeepEach(func(c *configs.Config) {
path := strings.Join(c.Path, ".")
if c.Module.Variables["v"] == nil {
gotTraces[path] = "<missing>"
return
}
varDesc := c.Module.Variables["v"].Description
gotTraces[path] = varDesc
})
assertResultDeepEqual(t, gotTraces, wantTraces)
}
func TestLoaderInstallModules_goGetter(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("this test accesses github.com; set TF_ACC=1 to run it")
}
fixtureDir := filepath.Clean("test-fixtures/go-getter-modules")
loader, done := tempChdirLoader(t, fixtureDir)
defer done()
hooks := &testInstallHooks{}
diags := loader.InstallModules(".", false, hooks)
assertNoDiagnostics(t, diags)
wantCalls := []testInstallHookCall{
// the configuration builder visits each level of calls in lexicographical
// order by name, so the following list is kept in the same order.
// acctest_child_a accesses //modules/child_a directly
{
Name: "Download",
ModuleAddr: "acctest_child_a",
PackageAddr: "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1", // intentionally excludes the subdir because we're downloading the whole repo here
},
{
Name: "Install",
ModuleAddr: "acctest_child_a",
LocalPath: ".terraform/modules/acctest_child_a/modules/child_a",
},
// acctest_child_a.child_b
// (no download because it's a relative path inside acctest_child_a)
{
Name: "Install",
ModuleAddr: "acctest_child_a.child_b",
LocalPath: ".terraform/modules/acctest_child_a/modules/child_b",
},
// acctest_child_b accesses //modules/child_b directly
{
Name: "Download",
ModuleAddr: "acctest_child_b",
PackageAddr: "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1", // intentionally excludes the subdir because we're downloading the whole package here
},
{
Name: "Install",
ModuleAddr: "acctest_child_b",
LocalPath: ".terraform/modules/acctest_child_b/modules/child_b",
},
// acctest_root
{
Name: "Download",
ModuleAddr: "acctest_root",
PackageAddr: "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1",
},
{
Name: "Install",
ModuleAddr: "acctest_root",
LocalPath: ".terraform/modules/acctest_root",
},
// acctest_root.child_a
// (no download because it's a relative path inside acctest_root)
{
Name: "Install",
ModuleAddr: "acctest_root.child_a",
LocalPath: ".terraform/modules/acctest_root/modules/child_a",
},
// acctest_root.child_a.child_b
// (no download because it's a relative path inside acctest_root, via acctest_root.child_a)
{
Name: "Install",
ModuleAddr: "acctest_root.child_a.child_b",
LocalPath: ".terraform/modules/acctest_root/modules/child_b",
},
}
if assertResultDeepEqual(t, hooks.Calls, wantCalls) {
return
}
// Make sure the configuration is loadable now.
// (This ensures that correct information is recorded in the manifest.)
config, loadDiags := loader.LoadConfig(".")
assertNoDiagnostics(t, loadDiags)
wantTraces := map[string]string{
"": "in local caller for go-getter-modules",
"acctest_root": "in root module",
"acctest_root.child_a": "in child_a module",
"acctest_root.child_a.child_b": "in child_b module",
"acctest_child_a": "in child_a module",
"acctest_child_a.child_b": "in child_b module",
"acctest_child_b": "in child_b module",
}
gotTraces := map[string]string{}
config.DeepEach(func(c *configs.Config) {
path := strings.Join(c.Path, ".")
if c.Module.Variables["v"] == nil {
gotTraces[path] = "<missing>"
return
}
varDesc := c.Module.Variables["v"].Description
gotTraces[path] = varDesc
})
assertResultDeepEqual(t, gotTraces, wantTraces)
}
type testInstallHooks struct {
Calls []testInstallHookCall
}
type testInstallHookCall struct {
Name string
ModuleAddr string
PackageAddr string
Version *version.Version
LocalPath string
}
func (h *testInstallHooks) Download(moduleAddr, packageAddr string, version *version.Version) {
h.Calls = append(h.Calls, testInstallHookCall{
Name: "Download",
ModuleAddr: moduleAddr,
PackageAddr: packageAddr,
Version: version,
})
}
func (h *testInstallHooks) Install(moduleAddr string, version *version.Version, localPath string) {
h.Calls = append(h.Calls, testInstallHookCall{
Name: "Install",
ModuleAddr: moduleAddr,
Version: version,
LocalPath: localPath,
})
}

View File

@ -0,0 +1,97 @@
package configload
import (
"fmt"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/configs"
)
// LoadConfig reads the Terraform module in the given directory and uses it as the
// root module to build the static module tree that represents a configuration,
// assuming that all required descendent modules have already been installed.
//
// If error diagnostics are returned, the returned configuration may be either
// nil or incomplete. In the latter case, cautious static analysis is possible
// in spite of the errors.
//
// LoadConfig performs the basic syntax and uniqueness validations that are
// required to process the individual modules, and also detects
func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) {
rootMod, diags := l.parser.LoadConfigDir(rootDir)
if rootMod == nil {
return nil, diags
}
cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad))
diags = append(diags, cDiags...)
return cfg, diags
}
// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that
// are presumed to have already been installed. A different function
// (moduleWalkerInstall) is used for installation.
func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
// Since we're just loading here, we expect that all referenced modules
// will be already installed and described in our manifest. However, we
// do verify that the manifest and the configuration are in agreement
// so that we can prompt the user to run "terraform init" if not.
key := manifestKey(req.Path)
record, exists := l.modules.manifest[key]
if !exists {
return nil, nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Module not installed",
Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.",
Subject: &req.CallRange,
},
}
}
var diags hcl.Diagnostics
// Check for inconsistencies between manifest and config
if req.SourceAddr != record.SourceAddr {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Module source has changed",
Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.",
Subject: &req.SourceAddrRange,
})
}
if !req.VersionConstraint.Required.Check(record.Version) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Module version requirements have changed",
Detail: fmt.Sprintf(
"The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.",
record.Version,
),
Subject: &req.SourceAddrRange,
})
}
mod, mDiags := l.parser.LoadConfigDir(record.Dir)
diags = append(diags, mDiags...)
if mod == nil {
// nil specifically indicates that the directory does not exist or
// cannot be read, so in this case we'll discard any generic diagnostics
// returned from LoadConfigDir and produce our own context-sensitive
// error message.
return nil, nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Module not installed",
Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir),
Subject: &req.CallRange,
},
}
}
return mod, record.Version, diags
}

View File

@ -0,0 +1,60 @@
package configload
import (
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/configs"
)
func TestLoaderLoadConfig_okay(t *testing.T) {
fixtureDir := filepath.Clean("test-fixtures/already-installed")
loader, err := NewLoader(&Config{
ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"),
})
if err != nil {
t.Fatalf("unexpected error from NewLoader: %s", err)
}
cfg, diags := loader.LoadConfig(fixtureDir)
assertNoDiagnostics(t, diags)
if cfg == nil {
t.Fatalf("config is nil; want non-nil")
}
var gotPaths []string
cfg.DeepEach(func(c *configs.Config) {
gotPaths = append(gotPaths, strings.Join(c.Path, "."))
})
sort.Strings(gotPaths)
wantPaths := []string{
"", // root module
"child_a",
"child_a.child_c",
"child_b",
"child_b.child_d",
}
if !reflect.DeepEqual(gotPaths, wantPaths) {
t.Fatalf("wrong module paths\ngot: %swant %s", spew.Sdump(gotPaths), spew.Sdump(wantPaths))
}
t.Run("child_a.child_c output", func(t *testing.T) {
output := cfg.Children["child_a"].Children["child_c"].Module.Outputs["hello"]
got, diags := output.Expr.Value(nil)
assertNoDiagnostics(t, diags)
assertResultCtyEqual(t, got, cty.StringVal("Hello from child_c"))
})
t.Run("child_b.child_d output", func(t *testing.T) {
output := cfg.Children["child_b"].Children["child_d"].Module.Outputs["hello"]
got, diags := output.Expr.Value(nil)
assertNoDiagnostics(t, diags)
assertResultCtyEqual(t, got, cty.StringVal("Hello from child_d"))
})
}

View File

@ -0,0 +1,141 @@
package configload
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/go-test/deep"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
// tempChdir copies the contents of the given directory to a temporary
// directory and changes the test process's current working directory to
// point to that directory. Also returned is a function that should be
// called at the end of the test (e.g. via "defer") to restore the previous
// working directory.
//
// Tests using this helper cannot safely be run in parallel with other tests.
func tempChdir(t *testing.T, sourceDir string) (string, func()) {
t.Helper()
tmpDir, err := ioutil.TempDir("", "terraform-configload")
if err != nil {
t.Fatalf("failed to create temporary directory: %s", err)
return "", nil
}
if err := copyDir(tmpDir, sourceDir); err != nil {
t.Fatalf("failed to copy fixture to temporary directory: %s", err)
return "", nil
}
oldDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to determine current working directory: %s", err)
return "", nil
}
err = os.Chdir(tmpDir)
if err != nil {
t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err)
return "", nil
}
t.Logf("tempChdir switched to %s after copying from %s", tmpDir, sourceDir)
return tmpDir, func() {
err := os.Chdir(oldDir)
if err != nil {
panic(fmt.Errorf("failed to restore previous working directory %s: %s", oldDir, err))
}
if os.Getenv("TF_CONFIGLOAD_TEST_KEEP_TMP") == "" {
os.RemoveAll(tmpDir)
}
}
}
// tempChdirLoader is a wrapper around tempChdir that also returns a Loader
// whose modules directory is at the conventional location within the
// created temporary directory.
func tempChdirLoader(t *testing.T, sourceDir string) (*Loader, func()) {
t.Helper()
_, done := tempChdir(t, sourceDir)
modulesDir := filepath.Clean(".terraform/modules")
err := os.MkdirAll(modulesDir, os.ModePerm)
if err != nil {
done() // undo the chdir in tempChdir so we can safely run other tests
t.Fatalf("failed to create modules directory: %s", err)
return nil, nil
}
loader, err := NewLoader(&Config{
ModulesDir: modulesDir,
})
if err != nil {
done() // undo the chdir in tempChdir so we can safely run other tests
t.Fatalf("failed to create loader: %s", err)
return nil, nil
}
return loader, done
}
func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool {
t.Helper()
return assertDiagnosticCount(t, diags, 0)
}
func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool {
t.Helper()
if len(diags) != 0 {
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want)
for _, diag := range diags {
t.Logf("- %s", diag)
}
return true
}
return false
}
func assertDiagnosticSummary(t *testing.T, diags hcl.Diagnostics, want string) bool {
t.Helper()
for _, diag := range diags {
if diag.Summary == want {
return false
}
}
t.Errorf("missing diagnostic summary %q", want)
for _, diag := range diags {
t.Logf("- %s", diag)
}
return true
}
func assertResultDeepEqual(t *testing.T, got, want interface{}) bool {
t.Helper()
if diff := deep.Equal(got, want); diff != nil {
for _, problem := range diff {
t.Errorf("%s", problem)
}
return true
}
return false
}
func assertResultCtyEqual(t *testing.T, got, want cty.Value) bool {
t.Helper()
if !got.RawEquals(want) {
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want)
return true
}
return false
}

View File

@ -0,0 +1,132 @@
package configload
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
version "github.com/hashicorp/go-version"
)
// moduleRecord represents some metadata about an installed module, as part
// of a moduleManifest.
type moduleRecord struct {
// Key is a unique identifier for this particular module, based on its
// position within the static module tree.
Key string `json:"Key"`
// SourceAddr is the source address given for this module in configuration.
// This is used only to detect if the source was changed in configuration
// since the module was last installed, which means that the installer
// must re-install it.
SourceAddr string `json:"Source"`
// Version is the exact version of the module, which results from parsing
// VersionStr. nil for un-versioned modules.
Version *version.Version `json:"-"`
// VersionStr is the version specifier string. This is used only for
// serialization in snapshots and should not be accessed or updated
// by any other codepaths; use "Version" instead.
VersionStr string `json:"Version,omitempty"`
// Dir is the path to the local directory where the module is installed.
Dir string `json:"Dir"`
}
// moduleManifest is a map used to keep track of the filesystem locations
// and other metadata about installed modules.
//
// The configuration loader refers to this, while the module installer updates
// it to reflect any changes to the installed modules.
type moduleManifest map[string]moduleRecord
func manifestKey(path []string) string {
return strings.Join(path, ".")
}
// manifestSnapshotFile is an internal struct used only to assist in our JSON
// serializtion of manifest snapshots. It should not be used for any other
// purposes.
type manifestSnapshotFile struct {
Records []moduleRecord `json:"Modules"`
}
const manifestFilename = "modules.json"
func (m *moduleMgr) manifestSnapshotPath() string {
return filepath.Join(m.Dir, manifestFilename)
}
// readModuleManifestSnapshot loads a manifest snapshot from the filesystem.
func (m *moduleMgr) readModuleManifestSnapshot() error {
src, err := m.FS.ReadFile(m.manifestSnapshotPath())
if err != nil {
if os.IsNotExist(err) {
// We'll treat a missing file as an empty manifest
m.manifest = make(moduleManifest)
return nil
}
return err
}
if len(src) == 0 {
// This should never happen, but we'll tolerate it as if it were
// a valid empty JSON object.
m.manifest = make(moduleManifest)
return nil
}
var read manifestSnapshotFile
err = json.Unmarshal(src, &read)
new := make(moduleManifest)
for _, record := range read.Records {
if record.VersionStr != "" {
record.Version, err = version.NewVersion(record.VersionStr)
if err != nil {
return fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err)
}
}
if _, exists := new[record.Key]; exists {
// This should never happen in any valid file, so we'll catch it
// and report it to avoid confusing/undefined behavior if the
// snapshot file was edited incorrectly outside of Terraform.
return fmt.Errorf("snapshot file contains two records for path %s", record.Key)
}
new[record.Key] = record
}
m.manifest = new
return nil
}
// writeModuleManifestSnapshot writes a snapshot of the current manifest
// to the filesystem.
//
// The caller must guarantee no concurrent modifications of the manifest for
// the duration of a call to this function, or the behavior is undefined.
func (m *moduleMgr) writeModuleManifestSnapshot() error {
var write manifestSnapshotFile
for _, record := range m.manifest {
// Make sure VersionStr is in sync with Version, since we encourage
// callers to manipulate Version and ignore VersionStr.
if record.Version != nil {
record.VersionStr = record.Version.String()
} else {
record.VersionStr = ""
}
write.Records = append(write.Records, record)
}
src, err := json.Marshal(write)
if err != nil {
return err
}
return m.FS.WriteFile(m.manifestSnapshotPath(), src, os.ModePerm)
}

View File

@ -0,0 +1,42 @@
package configload
import (
"github.com/hashicorp/terraform/registry"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/spf13/afero"
)
type moduleMgr struct {
FS afero.Afero
// CanInstall is true for a module manager that can support installation.
//
// This must be set only if FS is an afero.OsFs, because the installer
// (which uses go-getter) is not aware of the virtual filesystem
// abstraction and will always write into the "real" filesystem.
CanInstall bool
// Dir is the path where descendent modules are (or will be) installed.
Dir string
// Services is a service discovery client that will be used to find
// remote module registry endpoints. This object may be pre-loaded with
// cached discovery information.
Services *disco.Disco
// Creds provides optional credentials for communicating with service hosts.
Creds auth.CredentialsSource
// Registry is a client for the module registry protocol, which is used
// when a module is requested from a registry source.
Registry *registry.Client
// manifest tracks the currently-installed modules for this manager.
//
// The loader may read this. Only the installer may write to it, and
// after a set of updates are completed the installer must call
// writeModuleManifestSnapshot to persist a snapshot of the manifest
// to disk for use on subsequent runs.
manifest moduleManifest
}

View File

@ -0,0 +1,45 @@
package configload
import (
"strings"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/registry/regsrc"
)
var localSourcePrefixes = []string{
"./",
"../",
".\\",
"..\\",
}
func isLocalSourceAddr(addr string) bool {
for _, prefix := range localSourcePrefixes {
if strings.HasPrefix(addr, prefix) {
return true
}
}
return false
}
func isRegistrySourceAddr(addr string) bool {
_, err := regsrc.ParseModuleSource(addr)
return err == nil
}
// splitAddrSubdir splits the given address (which is assumed to be a
// registry address or go-getter-style address) into a package portion
// and a sub-directory portion.
//
// The package portion defines what should be downloaded and then the
// sub-directory portion, if present, specifies a sub-directory within
// the downloaded object (an archive, VCS repository, etc) that contains
// the module's configuration files.
//
// The subDir portion will be returned as empty if no subdir separator
// ("//") is present in the address.
func splitAddrSubdir(addr string) (packageAddr, subDir string) {
return getter.SourceDirSubdir(addr)
}

View File

@ -0,0 +1,4 @@
module "child_c" {
source = "./child_c"
}

View File

@ -0,0 +1,4 @@
output "hello" {
value = "Hello from child_c"
}

View File

@ -0,0 +1,4 @@
output "hello" {
value = "Hello from child_d"
}

View File

@ -0,0 +1,5 @@
module "child_d" {
source = "example.com/foo/bar_d/baz"
# Intentionally no version here
}

View File

@ -0,0 +1 @@
{"Modules":[{"Key":"","Source":"","Dir":"test-fixtures/already-installed"},{"Key":"child_a","Source":"example.com/foo/bar_a/baz","Version":"1.0.1","Dir":"test-fixtures/already-installed/.terraform/modules/child_a"},{"Key":"child_b","Source":"example.com/foo/bar_b/baz","Version":"1.0.0","Dir":"test-fixtures/already-installed/.terraform/modules/child_b"},{"Key":"child_a.child_c","Source":"./child_c","Dir":"test-fixtures/already-installed/.terraform/modules/child_a/child_c"},{"Key":"child_b.child_d","Source":"example.com/foo/bar_d/baz","Version":"1.2.0","Dir":"test-fixtures/already-installed/.terraform/modules/child_b.child_d"}]}

View File

@ -0,0 +1,10 @@
module "child_a" {
source = "example.com/foo/bar_a/baz"
version = ">= 1.0.0"
}
module "child_b" {
source = "example.com/foo/bar_b/baz"
version = ">= 1.0.0"
}

View File

View File

@ -0,0 +1 @@
.terraform/*

View File

@ -0,0 +1,21 @@
# This fixture depends on a github repo at:
# https://github.com/hashicorp/terraform-aws-module-installer-acctest
# ...and expects its v0.0.1 tag to be pointing at the following commit:
# d676ab2559d4e0621d59e3c3c4cbb33958ac4608
variable "v" {
description = "in local caller for go-getter-modules"
default = ""
}
module "acctest_root" {
source = "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1"
}
module "acctest_child_a" {
source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_a?ref=v0.0.1"
}
module "acctest_child_b" {
source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_b?ref=v0.0.1"
}

View File

@ -0,0 +1,9 @@
variable "v" {
description = "in child_a module"
default = ""
}
module "child_b" {
source = "./child_b"
}

View File

@ -0,0 +1,9 @@
variable "v" {
description = "in child_b module"
default = ""
}
output "hello" {
value = "Hello from child_b!"
}

View File

@ -0,0 +1,9 @@
variable "v" {
description = "in root module"
default = ""
}
module "child_a" {
source = "./child_a"
}

View File

@ -0,0 +1 @@
.terraform/*

View File

@ -0,0 +1,33 @@
# This fixture indirectly depends on a github repo at:
# https://github.com/hashicorp/terraform-aws-module-installer-acctest
# ...and expects its v0.0.1 tag to be pointing at the following commit:
# d676ab2559d4e0621d59e3c3c4cbb33958ac4608
#
# This repository is accessed indirectly via:
# https://registry.terraform.io/modules/hashicorp/module-installer-acctest/aws/0.0.1
#
# Since the tag's id is included in a downloaded archive, it is expected to
# have the following id:
# 853d03855b3290a3ca491d4c3a7684572dd42237
# (this particular assumption is encoded in the tests that use this fixture)
variable "v" {
description = "in local caller for registry-modules"
default = ""
}
module "acctest_root" {
source = "hashicorp/module-installer-acctest/aws"
version = "0.0.1"
}
module "acctest_child_a" {
source = "hashicorp/module-installer-acctest/aws//modules/child_a"
version = "0.0.1"
}
module "acctest_child_b" {
source = "hashicorp/module-installer-acctest/aws//modules/child_b"
version = "0.0.1"
}

23
configs/depends_on.go Normal file
View File

@ -0,0 +1,23 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
)
func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) {
var ret []hcl.Traversal
exprs, diags := hcl.ExprList(attr.Expr)
for _, expr := range exprs {
expr, shimDiags := shimTraversalInString(expr, false)
diags = append(diags, shimDiags...)
traversal, travDiags := hcl.AbsTraversalForExpr(expr)
diags = append(diags, travDiags...)
if len(traversal) != 0 {
ret = append(ret, traversal)
}
}
return ret, diags
}

19
configs/doc.go Normal file
View File

@ -0,0 +1,19 @@
// Package configs contains types that represent Terraform configurations and
// the different elements thereof.
//
// The functionality in this package can be used for some static analyses of
// Terraform configurations, but this package generally exposes representations
// of the configuration source code rather than the result of evaluating these
// objects. The sibling package "lang" deals with evaluation of structures
// and expressions in the configuration.
//
// Due to its close relationship with HCL, this package makes frequent use
// of types from the HCL API, including raw HCL diagnostic messages. Such
// diagnostics can be converted into Terraform-flavored diagnostics, if needed,
// using functions in the sibling package tfdiags.
//
// The Parser type is the main entry-point into this package. The LoadConfigDir
// method can be used to load a single module directory, and then a full
// configuration (including any descendent modules) can be produced using
// the top-level BuildConfig method.
package configs

376
configs/module.go Normal file
View File

@ -0,0 +1,376 @@
package configs
import (
"fmt"
"github.com/hashicorp/hcl2/hcl"
)
// Module is a container for a set of configuration constructs that are
// evaluated within a common namespace.
type Module struct {
CoreVersionConstraints []VersionConstraint
Backend *Backend
ProviderConfigs map[string]*Provider
ProviderRequirements map[string][]VersionConstraint
Variables map[string]*Variable
Locals map[string]*Local
Outputs map[string]*Output
ModuleCalls map[string]*ModuleCall
ManagedResources map[string]*ManagedResource
DataResources map[string]*DataResource
}
// File describes the contents of a single configuration file.
//
// Individual files are not usually used alone, but rather combined together
// with other files (conventionally, those in the same directory) to produce
// a *Module, using NewModule.
//
// At the level of an individual file we represent directly the structural
// elements present in the file, without any attempt to detect conflicting
// declarations. A File object can therefore be used for some basic static
// analysis of individual elements, but must be built into a Module to detect
// duplicate declarations.
type File struct {
CoreVersionConstraints []VersionConstraint
Backends []*Backend
ProviderConfigs []*Provider
ProviderRequirements []*ProviderRequirement
Variables []*Variable
Locals []*Local
Outputs []*Output
ModuleCalls []*ModuleCall
ManagedResources []*ManagedResource
DataResources []*DataResource
}
// NewModule takes a list of primary files and a list of override files and
// produces a *Module by combining the files together.
//
// If there are any conflicting declarations in the given files -- for example,
// if the same variable name is defined twice -- then the resulting module
// will be incomplete and error diagnostics will be returned. Careful static
// analysis of the returned Module is still possible in this case, but the
// module will probably not be semantically valid.
func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) {
var diags hcl.Diagnostics
mod := &Module{
ProviderConfigs: map[string]*Provider{},
ProviderRequirements: map[string][]VersionConstraint{},
Variables: map[string]*Variable{},
Locals: map[string]*Local{},
Outputs: map[string]*Output{},
ModuleCalls: map[string]*ModuleCall{},
ManagedResources: map[string]*ManagedResource{},
DataResources: map[string]*DataResource{},
}
for _, file := range primaryFiles {
fileDiags := mod.appendFile(file)
diags = append(diags, fileDiags...)
}
for _, file := range overrideFiles {
fileDiags := mod.mergeFile(file)
diags = append(diags, fileDiags...)
}
return mod, diags
}
func (m *Module) appendFile(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, constraint := range file.CoreVersionConstraints {
// If there are any conflicting requirements then we'll catch them
// when we actually check these constraints.
m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
}
for _, b := range file.Backends {
if m.Backend != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate backend configuration",
Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange),
Subject: &b.DeclRange,
})
continue
}
m.Backend = b
}
for _, pc := range file.ProviderConfigs {
key := pc.moduleUniqueKey()
if existing, exists := m.ProviderConfigs[key]; exists {
if existing.Alias == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate provider configuration",
Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange),
Subject: &pc.DeclRange,
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate provider configuration",
Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange),
Subject: &pc.DeclRange,
})
}
continue
}
m.ProviderConfigs[key] = pc
}
for _, reqd := range file.ProviderRequirements {
m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement)
}
for _, v := range file.Variables {
if existing, exists := m.Variables[v.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate variable declaration",
Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &v.DeclRange,
})
}
m.Variables[v.Name] = v
}
for _, l := range file.Locals {
if existing, exists := m.Locals[l.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate local value definition",
Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &l.DeclRange,
})
}
m.Locals[l.Name] = l
}
for _, o := range file.Outputs {
if existing, exists := m.Outputs[o.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate output definition",
Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &o.DeclRange,
})
}
m.Outputs[o.Name] = o
}
for _, mc := range file.ModuleCalls {
if existing, exists := m.ModuleCalls[mc.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate module call",
Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange),
Subject: &mc.DeclRange,
})
}
m.ModuleCalls[mc.Name] = mc
}
for _, r := range file.ManagedResources {
key := r.moduleUniqueKey()
if existing, exists := m.ManagedResources[key]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type),
Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
Subject: &r.DeclRange,
})
continue
}
m.ManagedResources[key] = r
}
for _, r := range file.DataResources {
key := r.moduleUniqueKey()
if existing, exists := m.DataResources[key]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type),
Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
Subject: &r.DeclRange,
})
continue
}
m.DataResources[key] = r
}
return diags
}
func (m *Module) mergeFile(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
if len(file.CoreVersionConstraints) != 0 {
// This is a bit of a strange case for overriding since we normally
// would union together across multiple files anyway, but we'll
// allow it and have each override file clobber any existing list.
m.CoreVersionConstraints = nil
for _, constraint := range file.CoreVersionConstraints {
m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
}
}
if len(file.Backends) != 0 {
switch len(file.Backends) {
case 1:
m.Backend = file.Backends[0]
default:
// An override file with multiple backends is still invalid, even
// though it can override backends from _other_ files.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate backend configuration",
Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange),
Subject: &file.Backends[1].DeclRange,
})
}
}
for _, pc := range file.ProviderConfigs {
key := pc.moduleUniqueKey()
existing, exists := m.ProviderConfigs[key]
if pc.Alias == "" {
// We allow overriding a non-existing _default_ provider configuration
// because the user model is that an absent provider configuration
// implies an empty provider configuration, which is what the user
// is therefore overriding here.
if exists {
mergeDiags := existing.merge(pc)
diags = append(diags, mergeDiags...)
} else {
m.ProviderConfigs[key] = pc
}
} else {
// For aliased providers, there must be a base configuration to
// override. This allows us to detect and report alias typos
// that might otherwise cause the override to not apply.
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base provider configuration for override",
Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias),
Subject: &pc.DeclRange,
})
continue
}
mergeDiags := existing.merge(pc)
diags = append(diags, mergeDiags...)
}
}
if len(file.ProviderRequirements) != 0 {
mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements)
}
for _, v := range file.Variables {
existing, exists := m.Variables[v.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base variable declaration to override",
Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name),
Subject: &v.DeclRange,
})
continue
}
mergeDiags := existing.merge(v)
diags = append(diags, mergeDiags...)
}
for _, l := range file.Locals {
existing, exists := m.Locals[l.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base local value definition to override",
Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name),
Subject: &l.DeclRange,
})
continue
}
mergeDiags := existing.merge(l)
diags = append(diags, mergeDiags...)
}
for _, o := range file.Outputs {
existing, exists := m.Outputs[o.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base output definition to override",
Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name),
Subject: &o.DeclRange,
})
continue
}
mergeDiags := existing.merge(o)
diags = append(diags, mergeDiags...)
}
for _, mc := range file.ModuleCalls {
existing, exists := m.ModuleCalls[mc.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing module call to override",
Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name),
Subject: &mc.DeclRange,
})
continue
}
mergeDiags := existing.merge(mc)
diags = append(diags, mergeDiags...)
}
for _, r := range file.ManagedResources {
key := r.moduleUniqueKey()
existing, exists := m.ManagedResources[key]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing resource to override",
Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name),
Subject: &r.DeclRange,
})
continue
}
mergeDiags := existing.merge(r)
diags = append(diags, mergeDiags...)
}
for _, r := range file.DataResources {
key := r.moduleUniqueKey()
existing, exists := m.DataResources[key]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing data resource to override",
Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name),
Subject: &r.DeclRange,
})
continue
}
mergeDiags := existing.merge(r)
diags = append(diags, mergeDiags...)
}
return diags
}

101
configs/module_call.go Normal file
View File

@ -0,0 +1,101 @@
package configs
import (
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
)
// ModuleCall represents a "module" block in a module or file.
type ModuleCall struct {
Name string
SourceAddr string
SourceAddrRange hcl.Range
SourceSet bool
Config hcl.Body
Version VersionConstraint
Count hcl.Expression
ForEach hcl.Expression
DependsOn []hcl.Traversal
DeclRange hcl.Range
}
func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) {
mc := &ModuleCall{
Name: block.Labels[0],
DeclRange: block.DefRange,
}
schema := moduleBlockSchema
if override {
schema = schemaForOverrides(schema)
}
content, remain, diags := block.Body.PartialContent(schema)
mc.Config = remain
if !hclsyntax.ValidIdentifier(mc.Name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module instance name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if attr, exists := content.Attributes["source"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr)
diags = append(diags, valDiags...)
mc.SourceAddrRange = attr.Expr.Range()
mc.SourceSet = true
}
if attr, exists := content.Attributes["version"]; exists {
var versionDiags hcl.Diagnostics
mc.Version, versionDiags = decodeVersionConstraint(attr)
diags = append(diags, versionDiags...)
}
if attr, exists := content.Attributes["count"]; exists {
mc.Count = attr.Expr
}
if attr, exists := content.Attributes["for_each"]; exists {
mc.ForEach = attr.Expr
}
if attr, exists := content.Attributes["depends_on"]; exists {
deps, depsDiags := decodeDependsOn(attr)
diags = append(diags, depsDiags...)
mc.DependsOn = append(mc.DependsOn, deps...)
}
return mc, diags
}
var moduleBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "source",
Required: true,
},
{
Name: "version",
},
{
Name: "count",
},
{
Name: "for_each",
},
{
Name: "depends_on",
},
},
}

213
configs/module_merge.go Normal file
View File

@ -0,0 +1,213 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
// The methods in this file are used by Module.mergeFile to apply overrides
// to our different configuration elements. These methods all follow the
// pattern of mutating the receiver to incorporate settings from the parameter,
// returning error diagnostics if any aspect of the parameter cannot be merged
// into the receiver for some reason.
//
// User expectation is that anything _explicitly_ set in the given object
// should take precedence over the corresponding settings in the receiver,
// but that anything omitted in the given object should be left unchanged.
// In some cases it may be reasonable to do a "deep merge" of certain nested
// features, if it is possible to unambiguously correlate the nested elements
// and their behaviors are orthogonal to each other.
func (p *Provider) merge(op *Provider) hcl.Diagnostics {
var diags hcl.Diagnostics
if op.Version.Required != nil {
p.Version = op.Version
}
p.Config = mergeBodies(p.Config, op.Config)
return diags
}
func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) {
// Any provider name that's mentioned in the override gets nilled out in
// our map so that we'll rebuild it below. Any provider not mentioned is
// left unchanged.
for _, reqd := range ovrd {
delete(recv, reqd.Name)
}
for _, reqd := range ovrd {
recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement)
}
}
func (v *Variable) merge(ov *Variable) hcl.Diagnostics {
var diags hcl.Diagnostics
if ov.DescriptionSet {
v.Description = ov.Description
v.DescriptionSet = ov.DescriptionSet
}
if ov.Default != cty.NilVal {
v.Default = ov.Default
}
if ov.TypeHint != TypeHintNone {
v.TypeHint = ov.TypeHint
}
return diags
}
func (l *Local) merge(ol *Local) hcl.Diagnostics {
var diags hcl.Diagnostics
// Since a local is just a single expression in configuration, the
// override definition entirely replaces the base definition, including
// the source range so that we'll send the user to the right place if
// there is an error.
l.Expr = ol.Expr
l.DeclRange = ol.DeclRange
return diags
}
func (o *Output) merge(oo *Output) hcl.Diagnostics {
var diags hcl.Diagnostics
if oo.Description != "" {
o.Description = oo.Description
}
if oo.Expr != nil {
o.Expr = oo.Expr
}
if oo.SensitiveSet {
o.Sensitive = oo.Sensitive
o.SensitiveSet = oo.SensitiveSet
}
// We don't allow depends_on to be overridden because that is likely to
// cause confusing misbehavior.
if len(oo.DependsOn) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported override",
Detail: "The depends_on argument may not be overridden.",
Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
})
}
return diags
}
func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics {
var diags hcl.Diagnostics
if omc.SourceSet {
mc.SourceAddr = omc.SourceAddr
mc.SourceAddrRange = omc.SourceAddrRange
mc.SourceSet = omc.SourceSet
}
if omc.Count != nil {
mc.Count = omc.Count
}
if omc.ForEach != nil {
mc.ForEach = omc.ForEach
}
if len(omc.Version.Required) != 0 {
mc.Version = omc.Version
}
mc.Config = mergeBodies(mc.Config, omc.Config)
// We don't allow depends_on to be overridden because that is likely to
// cause confusing misbehavior.
if len(mc.DependsOn) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported override",
Detail: "The depends_on argument may not be overridden.",
Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
})
}
return diags
}
func (r *ManagedResource) merge(or *ManagedResource) hcl.Diagnostics {
var diags hcl.Diagnostics
if or.Connection != nil {
r.Connection = or.Connection
}
if or.Count != nil {
r.Count = or.Count
}
if or.CreateBeforeDestroySet {
r.CreateBeforeDestroy = or.CreateBeforeDestroy
r.CreateBeforeDestroySet = or.CreateBeforeDestroySet
}
if or.ForEach != nil {
r.ForEach = or.ForEach
}
if len(or.IgnoreChanges) != 0 {
r.IgnoreChanges = or.IgnoreChanges
}
if or.PreventDestroySet {
r.PreventDestroy = or.PreventDestroy
r.PreventDestroySet = or.PreventDestroySet
}
if or.ProviderConfigRef != nil {
r.ProviderConfigRef = or.ProviderConfigRef
}
if len(or.Provisioners) != 0 {
r.Provisioners = or.Provisioners
}
r.Config = mergeBodies(r.Config, or.Config)
// We don't allow depends_on to be overridden because that is likely to
// cause confusing misbehavior.
if len(r.DependsOn) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported override",
Detail: "The depends_on argument may not be overridden.",
Subject: r.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
})
}
return diags
}
func (r *DataResource) merge(or *DataResource) hcl.Diagnostics {
var diags hcl.Diagnostics
if or.Count != nil {
r.Count = or.Count
}
if or.ForEach != nil {
r.ForEach = or.ForEach
}
if or.ProviderConfigRef != nil {
r.ProviderConfigRef = or.ProviderConfigRef
}
r.Config = mergeBodies(r.Config, or.Config)
// We don't allow depends_on to be overridden because that is likely to
// cause confusing misbehavior.
if len(r.DependsOn) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported override",
Detail: "The depends_on argument may not be overridden.",
Subject: r.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
})
}
return diags
}

View File

@ -0,0 +1,121 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
)
func mergeBodies(base, override hcl.Body) hcl.Body {
return mergeBody{
Base: base,
Override: override,
}
}
// mergeBody is a hcl.Body implementation that wraps a pair of other bodies
// and allows attributes and blocks within the override to take precedence
// over those defined in the base body.
//
// This is used to deal with dynamically-processed bodies in Module.mergeFile.
// It uses a shallow-only merging strategy where direct attributes defined
// in Override will override attributes of the same name in Base, while any
// blocks defined in Override will hide all blocks of the same type in Base.
//
// This cannot possibly "do the right thing" in all cases, because we don't
// have enough information about user intent. However, this behavior is intended
// to be reasonable for simple overriding use-cases.
type mergeBody struct {
Base hcl.Body
Override hcl.Body
}
var _ hcl.Body = mergeBody{}
func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
var diags hcl.Diagnostics
oSchema := schemaForOverrides(schema)
baseContent, cDiags := b.Base.Content(schema)
diags = append(diags, cDiags...)
overrideContent, cDiags := b.Override.Content(oSchema)
diags = append(diags, cDiags...)
content := b.prepareContent(baseContent, overrideContent)
return content, diags
}
func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
var diags hcl.Diagnostics
oSchema := schemaForOverrides(schema)
baseContent, baseRemain, cDiags := b.Base.PartialContent(schema)
diags = append(diags, cDiags...)
overrideContent, overrideRemain, cDiags := b.Override.PartialContent(oSchema)
diags = append(diags, cDiags...)
content := b.prepareContent(baseContent, overrideContent)
remain := mergeBodies(baseRemain, overrideRemain)
return content, remain, diags
}
func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent {
content := &hcl.BodyContent{
Attributes: make(hcl.Attributes),
}
// For attributes we just assign from each map in turn and let the override
// map clobber any matching entries from base.
for k, a := range base.Attributes {
content.Attributes[k] = a
}
for k, a := range override.Attributes {
content.Attributes[k] = a
}
// Things are a little more interesting for blocks because they arrive
// as a flat list. Our merging semantics call for us to suppress blocks
// from base if at least one block of the same type appears in override.
// We explicitly do not try to correlate and deeply merge nested blocks,
// since we don't have enough context here to infer user intent.
overriddenBlockTypes := make(map[string]bool)
for _, block := range override.Blocks {
overriddenBlockTypes[block.Type] = true
}
for _, block := range base.Blocks {
if overriddenBlockTypes[block.Type] {
continue
}
content.Blocks = append(content.Blocks, block)
}
for _, block := range override.Blocks {
content.Blocks = append(content.Blocks, block)
}
return content
}
func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
var diags hcl.Diagnostics
ret := make(hcl.Attributes)
baseAttrs, aDiags := b.Base.JustAttributes()
diags = append(diags, aDiags...)
overrideAttrs, aDiags := b.Override.JustAttributes()
diags = append(diags, aDiags...)
for k, a := range baseAttrs {
ret[k] = a
}
for k, a := range overrideAttrs {
ret[k] = a
}
return ret, diags
}
func (b mergeBody) MissingItemRange() hcl.Range {
return b.Base.MissingItemRange()
}

View File

@ -0,0 +1,136 @@
package configs
import (
"testing"
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
func TestModuleOverrideVariable(t *testing.T) {
mod, diags := testModuleFromDir("test-fixtures/valid-modules/override-variable")
assertNoDiagnostics(t, diags)
if mod == nil {
t.Fatalf("module is nil")
}
got := mod.Variables
want := map[string]*Variable{
"fully_overridden": {
Name: "fully_overridden",
Description: "b_override description",
DescriptionSet: true,
Default: cty.StringVal("b_override"),
TypeHint: TypeHintString,
DeclRange: hcl.Range{
Filename: "test-fixtures/valid-modules/override-variable/primary.tf",
Start: hcl.Pos{
Line: 1,
Column: 1,
Byte: 0,
},
End: hcl.Pos{
Line: 1,
Column: 28,
Byte: 27,
},
},
},
"partially_overridden": {
Name: "partially_overridden",
Description: "base description",
DescriptionSet: true,
Default: cty.StringVal("b_override partial"),
TypeHint: TypeHintString,
DeclRange: hcl.Range{
Filename: "test-fixtures/valid-modules/override-variable/primary.tf",
Start: hcl.Pos{
Line: 7,
Column: 1,
Byte: 103,
},
End: hcl.Pos{
Line: 7,
Column: 32,
Byte: 134,
},
},
},
}
assertResultDeepEqual(t, got, want)
}
func TestModuleOverrideModule(t *testing.T) {
mod, diags := testModuleFromDir("test-fixtures/valid-modules/override-module")
assertNoDiagnostics(t, diags)
if mod == nil {
t.Fatalf("module is nil")
}
if _, exists := mod.ModuleCalls["example"]; !exists {
t.Fatalf("no module 'example'")
}
if len(mod.ModuleCalls) != 1 {
t.Fatalf("wrong number of module calls in result %d; want 1", len(mod.ModuleCalls))
}
got := mod.ModuleCalls["example"]
want := &ModuleCall{
Name: "example",
SourceAddr: "./example2-a_override",
SourceAddrRange: hcl.Range{
Filename: "test-fixtures/valid-modules/override-module/a_override.tf",
Start: hcl.Pos{
Line: 3,
Column: 12,
Byte: 31,
},
End: hcl.Pos{
Line: 3,
Column: 35,
Byte: 54,
},
},
SourceSet: true,
DeclRange: hcl.Range{
Filename: "test-fixtures/valid-modules/override-module/primary.tf",
Start: hcl.Pos{
Line: 2,
Column: 1,
Byte: 1,
},
End: hcl.Pos{
Line: 2,
Column: 17,
Byte: 17,
},
},
}
// We're going to extract and nil out our hcl.Body here because DeepEqual
// is not a useful way to assert on that.
gotConfig := got.Config
got.Config = nil
assertResultDeepEqual(t, got, want)
type content struct {
Kept *string `hcl:"kept"`
Foo *string `hcl:"foo"`
New *string `hcl:"new"`
Newer *string `hcl:"newer"`
}
var gotArgs content
diags = gohcl.DecodeBody(gotConfig, nil, &gotArgs)
assertNoDiagnostics(t, diags)
wantArgs := content{
Kept: stringPtr("primary kept"),
Foo: stringPtr("a_override foo"),
New: stringPtr("b_override new"),
Newer: stringPtr("b_override newer"),
}
assertResultDeepEqual(t, gotArgs, wantArgs)
}

221
configs/named_values.go Normal file
View File

@ -0,0 +1,221 @@
package configs
import (
"fmt"
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/zclconf/go-cty/cty"
)
// A consistent detail message for all "not a valid identifier" diagnostics.
const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes."
// Variable represents a "variable" block in a module or file.
type Variable struct {
Name string
Description string
Default cty.Value
TypeHint VariableTypeHint
DescriptionSet bool
DeclRange hcl.Range
}
func decodeVariableBlock(block *hcl.Block) (*Variable, hcl.Diagnostics) {
v := &Variable{
Name: block.Labels[0],
DeclRange: block.DefRange,
}
content, diags := block.Body.Content(variableBlockSchema)
if !hclsyntax.ValidIdentifier(v.Name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid variable name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
// Don't allow declaration of variables that would conflict with the
// reserved attribute and block type names in a "module" block, since
// these won't be usable for child modules.
for _, attr := range moduleBlockSchema.Attributes {
if attr.Name == v.Name {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid variable name",
Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name),
Subject: &block.LabelRanges[0],
})
}
}
if attr, exists := content.Attributes["description"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description)
diags = append(diags, valDiags...)
v.DescriptionSet = true
}
if attr, exists := content.Attributes["default"]; exists {
val, valDiags := attr.Expr.Value(nil)
diags = append(diags, valDiags...)
v.Default = val
}
if attr, exists := content.Attributes["type"]; exists {
expr, shimDiags := shimTraversalInString(attr.Expr, true)
diags = append(diags, shimDiags...)
switch hcl.ExprAsKeyword(expr) {
case "string":
v.TypeHint = TypeHintString
case "list":
v.TypeHint = TypeHintList
case "map":
v.TypeHint = TypeHintMap
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid variable type hint",
Detail: "The type argument requires one of the following keywords: string, list, or map.",
Subject: expr.Range().Ptr(),
})
}
}
return v, diags
}
// Output represents an "output" block in a module or file.
type Output struct {
Name string
Description string
Expr hcl.Expression
DependsOn []hcl.Traversal
Sensitive bool
DescriptionSet bool
SensitiveSet bool
DeclRange hcl.Range
}
func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) {
o := &Output{
Name: block.Labels[0],
DeclRange: block.DefRange,
}
schema := outputBlockSchema
if override {
schema = schemaForOverrides(schema)
}
content, diags := block.Body.Content(schema)
if !hclsyntax.ValidIdentifier(o.Name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid output name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if attr, exists := content.Attributes["description"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description)
diags = append(diags, valDiags...)
o.DescriptionSet = true
}
if attr, exists := content.Attributes["value"]; exists {
o.Expr = attr.Expr
}
if attr, exists := content.Attributes["sensitive"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive)
diags = append(diags, valDiags...)
o.SensitiveSet = true
}
if attr, exists := content.Attributes["depends_on"]; exists {
deps, depsDiags := decodeDependsOn(attr)
diags = append(diags, depsDiags...)
o.DependsOn = append(o.DependsOn, deps...)
}
return o, diags
}
// Local represents a single entry from a "locals" block in a module or file.
// The "locals" block itself is not represented, because it serves only to
// provide context for us to interpret its contents.
type Local struct {
Name string
Expr hcl.Expression
DeclRange hcl.Range
}
func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) {
attrs, diags := block.Body.JustAttributes()
if len(attrs) == 0 {
return nil, diags
}
locals := make([]*Local, 0, len(attrs))
for name, attr := range attrs {
if !hclsyntax.ValidIdentifier(name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid local value name",
Detail: badIdentifierDetail,
Subject: &attr.NameRange,
})
}
locals = append(locals, &Local{
Name: name,
Expr: attr.Expr,
DeclRange: attr.Range,
})
}
return locals, diags
}
var variableBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "description",
},
{
Name: "default",
},
{
Name: "type",
},
},
}
var outputBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "description",
},
{
Name: "value",
Required: true,
},
{
Name: "depends_on",
},
{
Name: "sensitive",
},
},
}

85
configs/parser.go Normal file
View File

@ -0,0 +1,85 @@
package configs
import (
"fmt"
"strings"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hclparse"
"github.com/spf13/afero"
)
// Parser is the main interface to read configuration files and other related
// files from disk.
//
// It retains a cache of all files that are loaded so that they can be used
// to create source code snippets in diagnostics, etc.
type Parser struct {
fs afero.Afero
p *hclparse.Parser
}
// NewParser creates and returns a new Parser that reads files from the given
// filesystem. If a nil filesystem is passed then the system's "real" filesystem
// will be used, via afero.OsFs.
func NewParser(fs afero.Fs) *Parser {
if fs == nil {
fs = afero.OsFs{}
}
return &Parser{
fs: afero.Afero{Fs: fs},
p: hclparse.NewParser(),
}
}
// LoadHCLFile is a low-level method that reads the file at the given path,
// parses it, and returns the hcl.Body representing its root. In many cases
// it is better to use one of the other Load*File methods on this type,
// which additionally decode the root body in some way and return a higher-level
// construct.
//
// If the file cannot be read at all -- e.g. because it does not exist -- then
// this method will return a nil body and error diagnostics. In this case
// callers may wish to ignore the provided error diagnostics and produce
// a more context-sensitive error instead.
//
// The file will be parsed using the HCL native syntax unless the filename
// ends with ".json", in which case the HCL JSON syntax will be used.
func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) {
src, err := p.fs.ReadFile(path)
if err != nil {
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Failed to read file",
Detail: fmt.Sprintf("The file %q could not be read.", path),
},
}
}
var file *hcl.File
var diags hcl.Diagnostics
switch {
case strings.HasSuffix(path, ".json"):
file, diags = p.p.ParseJSON(src, path)
default:
file, diags = p.p.ParseHCL(src, path)
}
// If the returned file or body is nil, then we'll return a non-nil empty
// body so we'll meet our contract that nil means an error reading the file.
if file == nil || file.Body == nil {
return hcl.EmptyBody(), diags
}
return file.Body, diags
}
// Sources returns a map of the cached source buffers for all files that
// have been loaded through this parser, with source filenames (as requested
// when each file was opened) as the keys.
func (p *Parser) Sources() map[string][]byte {
return p.p.Sources()
}

247
configs/parser_config.go Normal file
View File

@ -0,0 +1,247 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
)
// LoadConfigFile reads the file at the given path and parses it as a config
// file.
//
// If the file cannot be read -- for example, if it does not exist -- then
// a nil *File will be returned along with error diagnostics. Callers may wish
// to disregard the returned diagnostics in this case and instead generate
// their own error message(s) with additional context.
//
// If the returned diagnostics has errors when a non-nil map is returned
// then the map may be incomplete but should be valid enough for careful
// static analysis.
//
// This method wraps LoadHCLFile, and so it inherits the syntax selection
// behaviors documented for that method.
func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) {
return p.loadConfigFile(path, false)
}
// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes
// certain required attribute constraints in order to interpret the given
// file as an overrides file.
func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) {
return p.loadConfigFile(path, true)
}
func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) {
body, diags := p.LoadHCLFile(path)
if body == nil {
return nil, diags
}
file := &File{}
var reqDiags hcl.Diagnostics
file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body)
diags = append(diags, reqDiags...)
content, contentDiags := body.Content(configFileSchema)
diags = append(diags, contentDiags...)
for _, block := range content.Blocks {
switch block.Type {
case "terraform":
content, contentDiags := block.Body.Content(terraformBlockSchema)
diags = append(diags, contentDiags...)
// We ignore the "terraform_version" attribute here because
// sniffCoreVersionRequirements already dealt with that above.
for _, innerBlock := range content.Blocks {
switch innerBlock.Type {
case "backend":
backendCfg, cfgDiags := decodeBackendBlock(innerBlock)
diags = append(diags, cfgDiags...)
if backendCfg != nil {
file.Backends = append(file.Backends, backendCfg)
}
case "required_providers":
reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock)
diags = append(diags, reqsDiags...)
file.ProviderRequirements = append(file.ProviderRequirements, reqs...)
default:
// Should never happen because the above cases should be exhaustive
// for all block type names in our schema.
continue
}
}
case "provider":
cfg, cfgDiags := decodeProviderBlock(block)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.ProviderConfigs = append(file.ProviderConfigs, cfg)
}
case "variable":
cfg, cfgDiags := decodeVariableBlock(block)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.Variables = append(file.Variables, cfg)
}
case "locals":
defs, defsDiags := decodeLocalsBlock(block)
diags = append(diags, defsDiags...)
file.Locals = append(file.Locals, defs...)
case "output":
cfg, cfgDiags := decodeOutputBlock(block, override)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.Outputs = append(file.Outputs, cfg)
}
case "module":
cfg, cfgDiags := decodeModuleBlock(block, override)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.ModuleCalls = append(file.ModuleCalls, cfg)
}
case "resource":
cfg, cfgDiags := decodeResourceBlock(block)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.ManagedResources = append(file.ManagedResources, cfg)
}
case "data":
cfg, cfgDiags := decodeDataBlock(block)
diags = append(diags, cfgDiags...)
if cfg != nil {
file.DataResources = append(file.DataResources, cfg)
}
default:
// Should never happen because the above cases should be exhaustive
// for all block type names in our schema.
continue
}
}
return file, diags
}
// sniffCoreVersionRequirements does minimal parsing of the given body for
// "terraform" blocks with "required_version" attributes, returning the
// requirements found.
//
// This is intended to maximize the chance that we'll be able to read the
// requirements (syntax errors notwithstanding) even if the config file contains
// constructs that might've been added in future Terraform versions
//
// This is a "best effort" sort of method which will return constraints it is
// able to find, but may return no constraints at all if the given body is
// so invalid that it cannot be decoded at all.
func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) {
rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema)
var constraints []VersionConstraint
for _, block := range rootContent.Blocks {
content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema)
diags = append(diags, blockDiags...)
attr, exists := content.Attributes["required_version"]
if !exists {
continue
}
constraint, constraintDiags := decodeVersionConstraint(attr)
diags = append(diags, constraintDiags...)
if !constraintDiags.HasErrors() {
constraints = append(constraints, constraint)
}
}
return constraints, diags
}
// configFileSchema is the schema for the top-level of a config file. We use
// the low-level HCL API for this level so we can easily deal with each
// block type separately with its own decoding logic.
var configFileSchema = &hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "terraform",
},
{
Type: "provider",
LabelNames: []string{"name"},
},
{
Type: "variable",
LabelNames: []string{"name"},
},
{
Type: "locals",
},
{
Type: "output",
LabelNames: []string{"name"},
},
{
Type: "module",
LabelNames: []string{"name"},
},
{
Type: "resource",
LabelNames: []string{"type", "name"},
},
{
Type: "data",
LabelNames: []string{"type", "name"},
},
},
}
// terraformBlockSchema is the schema for a top-level "terraform" block in
// a configuration file.
var terraformBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "required_version",
},
},
Blocks: []hcl.BlockHeaderSchema{
{
Type: "backend",
LabelNames: []string{"type"},
},
{
Type: "required_providers",
},
},
}
// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements
var configFileVersionSniffRootSchema = &hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: "terraform",
},
},
}
// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements
var configFileVersionSniffBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "required_version",
},
},
}

View File

@ -0,0 +1,131 @@
package configs
import (
"fmt"
"path/filepath"
"strings"
"github.com/hashicorp/hcl2/hcl"
)
// LoadConfigDir reads the .tf and .tf.json files in the given directory
// as config files (using LoadConfigFile) and then combines these files into
// a single Module.
//
// If this method returns nil, that indicates that the given directory does not
// exist at all or could not be opened for some reason. Callers may wish to
// detect this case and ignore the returned diagnostics so that they can
// produce a more context-aware error message in that case.
//
// If this method returns a non-nil module while error diagnostics are returned
// then the module may be incomplete but can be used carefully for static
// analysis.
//
// This file does not consider a directory with no files to be an error, and
// will simply return an empty module in that case. Callers should first call
// Parser.IsConfigDir if they wish to recognize that situation.
//
// .tf files are parsed using the HCL native syntax while .tf.json files are
// parsed using the HCL JSON syntax.
func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) {
primaryPaths, overridePaths, diags := p.dirFiles(path)
if diags.HasErrors() {
return nil, diags
}
primary, fDiags := p.loadFiles(primaryPaths, false)
diags = append(diags, fDiags...)
override, fDiags := p.loadFiles(overridePaths, true)
diags = append(diags, fDiags...)
mod, modDiags := NewModule(primary, override)
diags = append(diags, modDiags...)
return mod, diags
}
// IsConfigDir determines whether the given path refers to a directory that
// exists and contains at least one Terraform config file (with a .tf or
// .tf.json extension.)
func (p *Parser) IsConfigDir(path string) bool {
primaryPaths, overridePaths, _ := p.dirFiles(path)
return (len(primaryPaths) + len(overridePaths)) > 0
}
func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) {
var files []*File
var diags hcl.Diagnostics
for _, path := range paths {
var f *File
var fDiags hcl.Diagnostics
if override {
f, fDiags = p.LoadConfigFileOverride(path)
} else {
f, fDiags = p.LoadConfigFile(path)
}
diags = append(diags, fDiags...)
if f != nil {
files = append(files, f)
}
}
return files, diags
}
func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) {
infos, err := p.fs.ReadDir(dir)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Failed to read module directory",
Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir),
})
return
}
for _, info := range infos {
if info.IsDir() {
// We only care about files
continue
}
name := info.Name()
ext := fileExt(name)
if ext == "" || IsIgnoredFile(name) {
continue
}
baseName := name[:len(name)-len(ext)] // strip extension
isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override")
fullPath := filepath.Join(dir, name)
if isOverride {
override = append(override, fullPath)
} else {
primary = append(primary, fullPath)
}
}
return
}
// fileExt returns the Terraform configuration extension of the given
// path, or a blank string if it is not a recognized extension.
func fileExt(path string) string {
if strings.HasSuffix(path, ".tf") {
return ".tf"
} else if strings.HasSuffix(path, ".tf.json") {
return ".tf.json"
} else {
return ""
}
}
// IsIgnoredFile returns true if the given filename (which must not have a
// directory path ahead of it) should be ignored as e.g. an editor swap file.
func IsIgnoredFile(name string) bool {
return strings.HasPrefix(name, ".") || // Unix-like hidden files
strings.HasSuffix(name, "~") || // vim
strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
}

View File

@ -0,0 +1,136 @@
package configs
import (
"fmt"
"io/ioutil"
"path/filepath"
"testing"
)
// TestParseLoadConfigDirSuccess is a simple test that just verifies that
// a number of test configuration directories (in test-fixtures/valid-modules)
// can be parsed without raising any diagnostics.
//
// It also re-tests the individual files in test-fixtures/valid-files as if
// they were single-file modules, to ensure that they can be bundled into
// modules correctly.
//
// This test does not verify that reading these modules produces the correct
// module element contents. More detailed assertions may be made on some subset
// of these configuration files in other tests.
func TestParserLoadConfigDirSuccess(t *testing.T) {
dirs, err := ioutil.ReadDir("test-fixtures/valid-modules")
if err != nil {
t.Fatal(err)
}
for _, info := range dirs {
name := info.Name()
t.Run(name, func(t *testing.T) {
parser := NewParser(nil)
path := filepath.Join("test-fixtures/valid-modules", name)
_, diags := parser.LoadConfigDir(path)
if len(diags) != 0 {
t.Errorf("unexpected diagnostics")
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
})
}
// The individual files in test-fixtures/valid-files should also work
// when loaded as modules.
files, err := ioutil.ReadDir("test-fixtures/valid-files")
if err != nil {
t.Fatal(err)
}
for _, info := range files {
name := info.Name()
t.Run(fmt.Sprintf("%s as module", name), func(t *testing.T) {
src, err := ioutil.ReadFile(filepath.Join("test-fixtures/valid-files", name))
if err != nil {
t.Fatal(err)
}
parser := testParser(map[string]string{
"mod/" + name: string(src),
})
_, diags := parser.LoadConfigDir("mod")
if diags.HasErrors() {
t.Errorf("unexpected error diagnostics")
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
})
}
}
// TestParseLoadConfigDirFailure is a simple test that just verifies that
// a number of test configuration directories (in test-fixtures/invalid-modules)
// produce diagnostics when parsed.
//
// It also re-tests the individual files in test-fixtures/invalid-files as if
// they were single-file modules, to ensure that their errors are still
// detected when loading as part of a module.
//
// This test does not verify that reading these modules produces any
// diagnostics in particular. More detailed assertions may be made on some subset
// of these configuration files in other tests.
func TestParserLoadConfigDirFailure(t *testing.T) {
dirs, err := ioutil.ReadDir("test-fixtures/invalid-modules")
if err != nil {
t.Fatal(err)
}
for _, info := range dirs {
name := info.Name()
t.Run(name, func(t *testing.T) {
parser := NewParser(nil)
path := filepath.Join("test-fixtures/invalid-modules", name)
_, diags := parser.LoadConfigDir(path)
if !diags.HasErrors() {
t.Errorf("no errors; want at least one")
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
})
}
// The individual files in test-fixtures/valid-files should also work
// when loaded as modules.
files, err := ioutil.ReadDir("test-fixtures/invalid-files")
if err != nil {
t.Fatal(err)
}
for _, info := range files {
name := info.Name()
t.Run(fmt.Sprintf("%s as module", name), func(t *testing.T) {
src, err := ioutil.ReadFile(filepath.Join("test-fixtures/invalid-files", name))
if err != nil {
t.Fatal(err)
}
parser := testParser(map[string]string{
"mod/" + name: string(src),
})
_, diags := parser.LoadConfigDir("mod")
if !diags.HasErrors() {
t.Errorf("no errors; want at least one")
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
})
}
}

View File

@ -0,0 +1,181 @@
package configs
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/hashicorp/hcl2/hcl"
)
// TestParseLoadConfigFileSuccess is a simple test that just verifies that
// a number of test configuration files (in test-fixtures/valid-files) can
// be parsed without raising any diagnostics.
//
// This test does not verify that reading these files produces the correct
// file element contents. More detailed assertions may be made on some subset
// of these configuration files in other tests.
func TestParserLoadConfigFileSuccess(t *testing.T) {
files, err := ioutil.ReadDir("test-fixtures/valid-files")
if err != nil {
t.Fatal(err)
}
for _, info := range files {
name := info.Name()
t.Run(name, func(t *testing.T) {
src, err := ioutil.ReadFile(filepath.Join("test-fixtures/valid-files", name))
if err != nil {
t.Fatal(err)
}
parser := testParser(map[string]string{
name: string(src),
})
_, diags := parser.LoadConfigFile(name)
if diags.HasErrors() {
t.Errorf("unexpected error diagnostics")
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
})
}
}
// TestParseLoadConfigFileFailure is a simple test that just verifies that
// a number of test configuration files (in test-fixtures/invalid-files)
// produce errors as expected.
//
// This test does not verify specific error messages, so more detailed
// assertions should be made on some subset of these configuration files in
// other tests.
func TestParserLoadConfigFileFailure(t *testing.T) {
files, err := ioutil.ReadDir("test-fixtures/invalid-files")
if err != nil {
t.Fatal(err)
}
for _, info := range files {
name := info.Name()
t.Run(name, func(t *testing.T) {
src, err := ioutil.ReadFile(filepath.Join("test-fixtures/invalid-files", name))
if err != nil {
t.Fatal(err)
}
parser := testParser(map[string]string{
name: string(src),
})
_, diags := parser.LoadConfigFile(name)
if !diags.HasErrors() {
t.Errorf("LoadConfigFile succeeded; want errors")
}
for _, diag := range diags {
t.Logf("- %s", diag)
}
})
}
}
// This test uses a subset of the same fixture files as
// TestParserLoadConfigFileFailure, but additionally verifies that each
// file produces the expected diagnostic summary.
func TestParserLoadConfigFileFailureMessages(t *testing.T) {
tests := []struct {
Filename string
WantSeverity hcl.DiagnosticSeverity
WantDiag string
}{
{
"invalid-files/data-resource-lifecycle.tf",
hcl.DiagError,
"Unsupported lifecycle block",
},
{
"invalid-files/variable-type-unknown.tf",
hcl.DiagError,
"Invalid variable type hint",
},
{
"valid-files/variable-type-quoted.tf",
hcl.DiagWarning,
"Quoted keywords are deprecated",
},
{
"invalid-files/unexpected-attr.tf",
hcl.DiagError,
"Unsupported attribute",
},
{
"invalid-files/unexpected-block.tf",
hcl.DiagError,
"Unsupported block type",
},
{
"invalid-files/resource-lifecycle-badbool.tf",
hcl.DiagError,
"Unsuitable value type",
},
{
"valid-files/resources-dependson-quoted.tf",
hcl.DiagWarning,
"Quoted references are deprecated",
},
{
"valid-files/resources-ignorechanges-quoted.tf",
hcl.DiagWarning,
"Quoted references are deprecated",
},
{
"valid-files/resources-ignorechanges-all-legacy.tf",
hcl.DiagWarning,
"Deprecated ignore_changes wildcard",
},
{
"valid-files/resources-ignorechanges-all-legacy.tf.json",
hcl.DiagWarning,
"Deprecated ignore_changes wildcard",
},
{
"valid-files/resources-provisioner-when-quoted.tf",
hcl.DiagWarning,
"Quoted keywords are deprecated",
},
{
"valid-files/resources-provisioner-onfailure-quoted.tf",
hcl.DiagWarning,
"Quoted keywords are deprecated",
},
}
for _, test := range tests {
t.Run(test.Filename, func(t *testing.T) {
src, err := ioutil.ReadFile(filepath.Join("test-fixtures", test.Filename))
if err != nil {
t.Fatal(err)
}
parser := testParser(map[string]string{
test.Filename: string(src),
})
_, diags := parser.LoadConfigFile(test.Filename)
if len(diags) != 1 {
t.Errorf("Wrong number of diagnostics %d; want 1", len(diags))
for _, diag := range diags {
t.Logf("- %s", diag)
}
return
}
if diags[0].Severity != test.WantSeverity {
t.Errorf("Wrong diagnostic severity %#v; want %#v", diags[0].Severity, test.WantSeverity)
}
if diags[0].Summary != test.WantDiag {
t.Errorf("Wrong diagnostic summary\ngot: %s\nwant: %s", diags[0].Summary, test.WantDiag)
}
})
}
}

99
configs/parser_test.go Normal file
View File

@ -0,0 +1,99 @@
package configs
import (
"os"
"path"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/hcl2/hcl"
"github.com/spf13/afero"
)
// testParser returns a parser that reads files from the given map, which
// is from paths to file contents.
//
// Since this function uses only in-memory objects, it should never fail.
// If any errors are encountered in practice, this function will panic.
func testParser(files map[string]string) *Parser {
fs := afero.Afero{Fs: afero.NewMemMapFs()}
for filePath, contents := range files {
dirPath := path.Dir(filePath)
err := fs.MkdirAll(dirPath, os.ModePerm)
if err != nil {
panic(err)
}
err = fs.WriteFile(filePath, []byte(contents), os.ModePerm)
if err != nil {
panic(err)
}
}
return NewParser(fs)
}
// testModuleFromFile reads a single file, wraps it in a module, and returns
// it. This is a helper for use in unit tests.
func testModuleFromFile(filename string) (*Module, hcl.Diagnostics) {
parser := NewParser(nil)
f, diags := parser.LoadConfigFile(filename)
mod, modDiags := NewModule([]*File{f}, nil)
diags = append(diags, modDiags...)
return mod, modDiags
}
// testModuleFromDir reads configuration from the given directory path as
// a module and returns it. This is a helper for use in unit tests.
func testModuleFromDir(path string) (*Module, hcl.Diagnostics) {
parser := NewParser(nil)
return parser.LoadConfigDir(path)
}
func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool {
t.Helper()
return assertDiagnosticCount(t, diags, 0)
}
func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool {
t.Helper()
if len(diags) != 0 {
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want)
for _, diag := range diags {
t.Logf("- %s", diag)
}
return true
}
return false
}
func assertDiagnosticSummary(t *testing.T, diags hcl.Diagnostics, want string) bool {
t.Helper()
for _, diag := range diags {
if diag.Summary == want {
return false
}
}
t.Errorf("missing diagnostic summary %q", want)
for _, diag := range diags {
t.Logf("- %s", diag)
}
return true
}
func assertResultDeepEqual(t *testing.T, got, want interface{}) bool {
t.Helper()
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want))
return true
}
return false
}
func stringPtr(s string) *string {
return &s
}

43
configs/parser_values.go Normal file
View File

@ -0,0 +1,43 @@
package configs
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
// LoadValuesFile reads the file at the given path and parses it as a "values
// file", which is an HCL config file whose top-level attributes are treated
// as arbitrary key.value pairs.
//
// If the file cannot be read -- for example, if it does not exist -- then
// a nil map will be returned along with error diagnostics. Callers may wish
// to disregard the returned diagnostics in this case and instead generate
// their own error message(s) with additional context.
//
// If the returned diagnostics has errors when a non-nil map is returned
// then the map may be incomplete but should be valid enough for careful
// static analysis.
//
// This method wraps LoadHCLFile, and so it inherits the syntax selection
// behaviors documented for that method.
func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) {
body, diags := p.LoadHCLFile(path)
if body == nil {
return nil, diags
}
vals := make(map[string]cty.Value)
attrs, attrDiags := body.JustAttributes()
diags = append(diags, attrDiags...)
if attrs == nil {
return vals, diags
}
for name, attr := range attrs {
val, valDiags := attr.Expr.Value(nil)
diags = append(diags, valDiags...)
vals[name] = val
}
return vals, diags
}

View File

@ -0,0 +1,113 @@
package configs
import (
"testing"
"github.com/zclconf/go-cty/cty"
)
func TestParserLoadValuesFile(t *testing.T) {
tests := map[string]struct {
Source string
Want map[string]cty.Value
DiagCount int
}{
"empty.tfvars": {
"",
map[string]cty.Value{},
0,
},
"empty.json": {
"{}",
map[string]cty.Value{},
0,
},
"zerolen.json": {
"",
map[string]cty.Value{},
2, // syntax error and missing root object
},
"one-number.tfvars": {
"foo = 1\n",
map[string]cty.Value{
"foo": cty.NumberIntVal(1),
},
0,
},
"one-number.tfvars.json": {
`{"foo": 1}`,
map[string]cty.Value{
"foo": cty.NumberIntVal(1),
},
0,
},
"two-bools.tfvars": {
"foo = true\nbar = false\n",
map[string]cty.Value{
"foo": cty.True,
"bar": cty.False,
},
0,
},
"two-bools.tfvars.json": {
`{"foo": true, "bar": false}`,
map[string]cty.Value{
"foo": cty.True,
"bar": cty.False,
},
0,
},
"invalid-syntax.tfvars": {
"foo bar baz\n",
map[string]cty.Value{},
1, // attribute or block definition required
},
"block.tfvars": {
"foo = true\ninvalid {\n}\n",
map[string]cty.Value{
"foo": cty.True,
},
1, // blocks are not allowed
},
"variables.tfvars": {
"baz = true\nfoo = var.baz\n",
map[string]cty.Value{
"baz": cty.True,
"foo": cty.DynamicVal,
},
1, // variables cannot be referenced here
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
p := testParser(map[string]string{
name: test.Source,
})
got, diags := p.LoadValuesFile(name)
if len(diags) != test.DiagCount {
t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
for _, diag := range diags {
t.Logf("- %s", diag)
}
}
if len(got) != len(test.Want) {
t.Errorf("wrong number of result keys %d; want %d", len(got), len(test.Want))
}
for name, gotVal := range got {
wantVal := test.Want[name]
if wantVal == cty.NilVal {
t.Errorf("unexpected result key %q", name)
continue
}
if !gotVal.RawEquals(wantVal) {
t.Errorf("wrong value for %q\ngot: %#v\nwant: %#v", name, gotVal, wantVal)
continue
}
}
})
}
}

100
configs/provider.go Normal file
View File

@ -0,0 +1,100 @@
package configs
import (
"fmt"
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
)
// Provider represents a "provider" block in a module or file. A provider
// block is a provider configuration, and there can be zero or more
// configurations for each actual provider.
type Provider struct {
Name string
NameRange hcl.Range
Alias string
AliasRange *hcl.Range // nil if no alias set
Version VersionConstraint
Config hcl.Body
DeclRange hcl.Range
}
func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) {
content, config, diags := block.Body.PartialContent(providerBlockSchema)
provider := &Provider{
Name: block.Labels[0],
NameRange: block.LabelRanges[0],
Config: config,
DeclRange: block.DefRange,
}
if attr, exists := content.Attributes["alias"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias)
diags = append(diags, valDiags...)
provider.AliasRange = attr.Expr.Range().Ptr()
if !hclsyntax.ValidIdentifier(provider.Alias) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid provider configuration alias",
Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail),
})
}
}
if attr, exists := content.Attributes["version"]; exists {
var versionDiags hcl.Diagnostics
provider.Version, versionDiags = decodeVersionConstraint(attr)
diags = append(diags, versionDiags...)
}
return provider, diags
}
func (p *Provider) moduleUniqueKey() string {
if p.Alias != "" {
return fmt.Sprintf("%s.%s", p.Name, p.Alias)
}
return p.Name
}
// ProviderRequirement represents a declaration of a dependency on a particular
// provider version without actually configuring that provider. This is used in
// child modules that expect a provider to be passed in from their parent.
type ProviderRequirement struct {
Name string
Requirement VersionConstraint
}
func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) {
attrs, diags := block.Body.JustAttributes()
var reqs []*ProviderRequirement
for name, attr := range attrs {
req, reqDiags := decodeVersionConstraint(attr)
diags = append(diags, reqDiags...)
if !diags.HasErrors() {
reqs = append(reqs, &ProviderRequirement{
Name: name,
Requirement: req,
})
}
}
return reqs, diags
}
var providerBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "alias",
},
{
Name: "version",
},
},
}

173
configs/provisioner.go Normal file
View File

@ -0,0 +1,173 @@
package configs
import (
"fmt"
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
)
// Provisioner represents a "provisioner" block when used within a
// "resource" block in a module or file.
type Provisioner struct {
Type string
Config hcl.Body
Connection *Connection
When ProvisionerWhen
OnFailure ProvisionerOnFailure
DeclRange hcl.Range
TypeRange hcl.Range
}
func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) {
pv := &Provisioner{
Type: block.Labels[0],
TypeRange: block.LabelRanges[0],
DeclRange: block.DefRange,
When: ProvisionerWhenCreate,
OnFailure: ProvisionerOnFailureFail,
}
content, config, diags := block.Body.PartialContent(provisionerBlockSchema)
pv.Config = config
if attr, exists := content.Attributes["when"]; exists {
expr, shimDiags := shimTraversalInString(attr.Expr, true)
diags = append(diags, shimDiags...)
switch hcl.ExprAsKeyword(expr) {
case "create":
pv.When = ProvisionerWhenCreate
case "destroy":
pv.When = ProvisionerWhenDestroy
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid \"when\" keyword",
Detail: "The \"when\" argument requires one of the following keywords: create or destroy.",
Subject: expr.Range().Ptr(),
})
}
}
if attr, exists := content.Attributes["on_failure"]; exists {
expr, shimDiags := shimTraversalInString(attr.Expr, true)
diags = append(diags, shimDiags...)
switch hcl.ExprAsKeyword(expr) {
case "continue":
pv.OnFailure = ProvisionerOnFailureContinue
case "fail":
pv.OnFailure = ProvisionerOnFailureFail
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid \"on_failure\" keyword",
Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.",
Subject: attr.Expr.Range().Ptr(),
})
}
}
var seenConnection *hcl.Block
for _, block := range content.Blocks {
switch block.Type {
case "connection":
if seenConnection != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate connection block",
Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange),
Subject: &block.DefRange,
})
continue
}
seenConnection = block
conn, connDiags := decodeConnectionBlock(block)
diags = append(diags, connDiags...)
pv.Connection = conn
default:
// Should never happen because there are no other block types
// declared in our schema.
}
}
return pv, diags
}
// Connection represents a "connection" block when used within either a
// "resource" or "provisioner" block in a module or file.
type Connection struct {
Type string
Config hcl.Body
DeclRange hcl.Range
TypeRange *hcl.Range // nil if type is not set
}
func decodeConnectionBlock(block *hcl.Block) (*Connection, hcl.Diagnostics) {
content, config, diags := block.Body.PartialContent(&hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "type",
},
},
})
conn := &Connection{
Type: "ssh",
Config: config,
DeclRange: block.DefRange,
}
if attr, exists := content.Attributes["type"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &conn.Type)
diags = append(diags, valDiags...)
conn.TypeRange = attr.Expr.Range().Ptr()
}
return conn, diags
}
// ProvisionerWhen is an enum for valid values for when to run provisioners.
type ProvisionerWhen int
//go:generate stringer -type ProvisionerWhen
const (
ProvisionerWhenInvalid ProvisionerWhen = iota
ProvisionerWhenCreate
ProvisionerWhenDestroy
)
// ProvisionerOnFailure is an enum for valid values for on_failure options
// for provisioners.
type ProvisionerOnFailure int
//go:generate stringer -type ProvisionerOnFailure
const (
ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
ProvisionerOnFailureContinue
ProvisionerOnFailureFail
)
var provisionerBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "when",
},
{
Name: "on_failure",
},
},
Blocks: []hcl.BlockHeaderSchema{
{
Type: "connection",
},
},
}

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT.
package configs
import "strconv"
const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail"
var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79}
func (i ProvisionerOnFailure) String() string {
if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) {
return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]]
}

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT.
package configs
import "strconv"
const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy"
var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65}
func (i ProvisionerWhen) String() string {
if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) {
return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]]
}

425
configs/resource.go Normal file
View File

@ -0,0 +1,425 @@
package configs
import (
"fmt"
"github.com/hashicorp/hcl2/gohcl"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
)
// ManagedResource represents a "resource" block in a module or file.
type ManagedResource struct {
Name string
Type string
Config hcl.Body
Count hcl.Expression
ForEach hcl.Expression
ProviderConfigRef *ProviderConfigRef
DependsOn []hcl.Traversal
Connection *Connection
Provisioners []*Provisioner
CreateBeforeDestroy bool
PreventDestroy bool
IgnoreChanges []hcl.Traversal
IgnoreAllChanges bool
CreateBeforeDestroySet bool
PreventDestroySet bool
DeclRange hcl.Range
TypeRange hcl.Range
}
func (r *ManagedResource) moduleUniqueKey() string {
return fmt.Sprintf("%s.%s", r.Name, r.Type)
}
func decodeResourceBlock(block *hcl.Block) (*ManagedResource, hcl.Diagnostics) {
r := &ManagedResource{
Type: block.Labels[0],
Name: block.Labels[1],
DeclRange: block.DefRange,
TypeRange: block.LabelRanges[0],
}
content, remain, diags := block.Body.PartialContent(resourceBlockSchema)
r.Config = remain
if !hclsyntax.ValidIdentifier(r.Type) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid resource type name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if !hclsyntax.ValidIdentifier(r.Name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid resource name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if attr, exists := content.Attributes["count"]; exists {
r.Count = attr.Expr
}
if attr, exists := content.Attributes["for_each"]; exists {
r.Count = attr.Expr
}
if attr, exists := content.Attributes["provider"]; exists {
var providerDiags hcl.Diagnostics
r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr)
diags = append(diags, providerDiags...)
}
if attr, exists := content.Attributes["depends_on"]; exists {
deps, depsDiags := decodeDependsOn(attr)
diags = append(diags, depsDiags...)
r.DependsOn = append(r.DependsOn, deps...)
}
var seenLifecycle *hcl.Block
var seenConnection *hcl.Block
for _, block := range content.Blocks {
switch block.Type {
case "lifecycle":
if seenLifecycle != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate lifecycle block",
Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange),
Subject: &block.DefRange,
})
continue
}
seenLifecycle = block
lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema)
diags = append(diags, lcDiags...)
if attr, exists := lcContent.Attributes["create_before_destroy"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.CreateBeforeDestroy)
diags = append(diags, valDiags...)
r.CreateBeforeDestroySet = true
}
if attr, exists := lcContent.Attributes["prevent_destroy"]; exists {
valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.PreventDestroy)
diags = append(diags, valDiags...)
r.PreventDestroySet = true
}
if attr, exists := lcContent.Attributes["ignore_changes"]; exists {
// ignore_changes can either be a list of relative traversals
// or it can be just the keyword "all" to ignore changes to this
// resource entirely.
// ignore_changes = [ami, instance_type]
// ignore_changes = all
// We also allow two legacy forms for compatibility with earlier
// versions:
// ignore_changes = ["ami", "instance_type"]
// ignore_changes = ["*"]
kw := hcl.ExprAsKeyword(attr.Expr)
switch {
case kw == "all":
r.IgnoreAllChanges = true
default:
exprs, listDiags := hcl.ExprList(attr.Expr)
diags = append(diags, listDiags...)
var ignoreAllRange hcl.Range
for _, expr := range exprs {
// our expr might be the literal string "*", which
// we accept as a deprecated way of saying "all".
if shimIsIgnoreChangesStar(expr) {
r.IgnoreAllChanges = true
ignoreAllRange = expr.Range()
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagWarning,
Summary: "Deprecated ignore_changes wildcard",
Detail: "The [\"*\"] form of ignore_changes wildcard is reprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.",
Subject: attr.Expr.Range().Ptr(),
})
continue
}
expr, shimDiags := shimTraversalInString(expr, false)
diags = append(diags, shimDiags...)
traversal, travDiags := hcl.RelTraversalForExpr(expr)
diags = append(diags, travDiags...)
if len(traversal) != 0 {
r.IgnoreChanges = append(r.IgnoreChanges, traversal)
}
}
if r.IgnoreAllChanges && len(r.IgnoreChanges) != 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid ignore_changes ruleset",
Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.",
Subject: &ignoreAllRange,
Context: attr.Expr.Range().Ptr(),
})
}
}
}
case "connection":
if seenConnection != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate connection block",
Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange),
Subject: &block.DefRange,
})
continue
}
seenConnection = block
conn, connDiags := decodeConnectionBlock(block)
diags = append(diags, connDiags...)
r.Connection = conn
case "provisioner":
pv, pvDiags := decodeProvisionerBlock(block)
diags = append(diags, pvDiags...)
if pv != nil {
r.Provisioners = append(r.Provisioners, pv)
}
default:
// Should never happen, because the above cases should always be
// exhaustive for all the types specified in our schema.
continue
}
}
return r, diags
}
// DataResource represents a "data" block in a module or file.
type DataResource struct {
Name string
Type string
Config hcl.Body
Count hcl.Expression
ForEach hcl.Expression
ProviderConfigRef *ProviderConfigRef
DependsOn []hcl.Traversal
DeclRange hcl.Range
TypeRange hcl.Range
}
func (r *DataResource) moduleUniqueKey() string {
return fmt.Sprintf("data.%s.%s", r.Name, r.Type)
}
func decodeDataBlock(block *hcl.Block) (*DataResource, hcl.Diagnostics) {
r := &DataResource{
Type: block.Labels[0],
Name: block.Labels[1],
DeclRange: block.DefRange,
TypeRange: block.LabelRanges[0],
}
content, remain, diags := block.Body.PartialContent(dataBlockSchema)
r.Config = remain
if !hclsyntax.ValidIdentifier(r.Type) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid data source name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if !hclsyntax.ValidIdentifier(r.Name) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid data resource name",
Detail: badIdentifierDetail,
Subject: &block.LabelRanges[0],
})
}
if attr, exists := content.Attributes["count"]; exists {
r.Count = attr.Expr
}
if attr, exists := content.Attributes["for_each"]; exists {
r.Count = attr.Expr
}
if attr, exists := content.Attributes["provider"]; exists {
var providerDiags hcl.Diagnostics
r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr)
diags = append(diags, providerDiags...)
}
if attr, exists := content.Attributes["depends_on"]; exists {
deps, depsDiags := decodeDependsOn(attr)
diags = append(diags, depsDiags...)
r.DependsOn = append(r.DependsOn, deps...)
}
for _, block := range content.Blocks {
// Our schema only allows for "lifecycle" blocks, so we can assume
// that this is all we will see here. We don't have any lifecycle
// attributes for data resources currently, so we'll just produce
// an error.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported lifecycle block",
Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.",
Subject: &block.DefRange,
})
break
}
return r, diags
}
type ProviderConfigRef struct {
Name string
NameRange hcl.Range
Alias string
AliasRange *hcl.Range // nil if alias not set
}
func decodeProviderConfigRef(attr *hcl.Attribute) (*ProviderConfigRef, hcl.Diagnostics) {
var diags hcl.Diagnostics
expr, shimDiags := shimTraversalInString(attr.Expr, false)
diags = append(diags, shimDiags...)
traversal, travDiags := hcl.AbsTraversalForExpr(expr)
// AbsTraversalForExpr produces only generic errors, so we'll discard
// the errors given and produce our own with extra context. If we didn't
// get any errors then we might still have warnings, though.
if !travDiags.HasErrors() {
diags = append(diags, travDiags...)
}
if len(traversal) < 1 && len(traversal) > 2 {
// A provider reference was given as a string literal in the legacy
// configuration language and there are lots of examples out there
// showing that usage, so we'll sniff for that situation here and
// produce a specialized error message for it to help users find
// the new correct form.
if exprIsNativeQuotedString(attr.Expr) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid provider configuration reference",
Detail: "A provider configuration reference must not be given in quotes.",
Subject: expr.Range().Ptr(),
})
return nil, diags
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid provider configuration reference",
Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", attr.Name),
Subject: expr.Range().Ptr(),
})
return nil, diags
}
ret := &ProviderConfigRef{
Name: traversal.RootName(),
NameRange: traversal[0].SourceRange(),
}
if len(traversal) > 1 {
aliasStep, ok := traversal[1].(hcl.TraverseAttr)
if !ok {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid provider configuration reference",
Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.",
Subject: traversal[1].SourceRange().Ptr(),
})
return ret, diags
}
ret.Alias = aliasStep.Name
ret.AliasRange = aliasStep.SourceRange().Ptr()
}
return ret, diags
}
var commonResourceAttributes = []hcl.AttributeSchema{
{
Name: "count",
},
{
Name: "for_each",
},
{
Name: "provider",
},
{
Name: "depends_on",
},
}
var resourceBlockSchema = &hcl.BodySchema{
Attributes: commonResourceAttributes,
Blocks: []hcl.BlockHeaderSchema{
{
Type: "lifecycle",
},
{
Type: "connection",
},
{
Type: "provisioner",
LabelNames: []string{"type"},
},
},
}
var dataBlockSchema = &hcl.BodySchema{
Attributes: commonResourceAttributes,
Blocks: []hcl.BlockHeaderSchema{
{
Type: "lifecycle",
},
},
}
var resourceLifecycleBlockSchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "create_before_destroy",
},
{
Name: "prevent_destroy",
},
{
Name: "ignore_changes",
},
},
}

View File

@ -0,0 +1,4 @@
module "child_c" {
source = "child_c"
}

View File

@ -0,0 +1,7 @@
module "child_c" {
# In the unit test where this fixture is used, we treat the source strings
# as absolute paths rather than as source addresses as we would in a real
# module walker.
source = "child_c"
}

View File

@ -0,0 +1,3 @@
output "hello" {
value = "hello"
}

View File

@ -0,0 +1,9 @@
module "child_a" {
source = "child_a"
}
module "child_b" {
source = "child_b"
}

View File

@ -0,0 +1,5 @@
data "example" "example" {
lifecycle {
# This block intentionally left blank
}
}

View File

@ -0,0 +1,3 @@
{
"terraform": {}
}

View File

@ -0,0 +1,2 @@
terraform {
}

View File

@ -0,0 +1,5 @@
resource "example" "example" {
lifecycle {
create_before_destroy = "ABSOLUTELY NOT"
}
}

View File

@ -0,0 +1,5 @@
resource "aws_instance" "web" {
lifecycle {
ignore_changes = ["*", "foo"]
}
}

View File

@ -0,0 +1 @@
foo = "bar"

View File

@ -0,0 +1,2 @@
varyable "whoops" {
}

View File

@ -0,0 +1,3 @@
variable "bad_type" {
type = notatype
}

View File

@ -0,0 +1,3 @@
variable "foo" {
description = "overridden"
}

View File

@ -0,0 +1,10 @@
terraform {
backend "example" {
foo = "bar"
baz {
bar = "foo"
}
}
}

View File

@ -0,0 +1,15 @@
data "http" "example1" {
}
data "http" "example2" {
url = "http://example.com/"
request_headers = {
"Accept" = "application/json"
}
count = 5
depends_on = [
data.http.example1,
]
}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1,16 @@
locals {
# This block intentionally left blank
}
locals {
foo = "foo"
bar = true
}
locals {
baz = "oink"
dunno = "🤷"
rowing = "🚣‍♀️"
π = 3.14159265359
}

View File

@ -0,0 +1,10 @@
{
"locals": {
"foo": "foo",
"bar": true,
"baz": "oink",
"dunno": "🤷",
"rowing": "🚣‍♀️",
"π": 3.14159265359
}
}

View File

@ -0,0 +1,26 @@
module "foo" {
source = "./foo"
# this block intentionally left (almost) blank
}
module "bar" {
source = "hashicorp/bar/aws"
boom = "🎆"
yes = true
}
module "baz" {
source = "git::https://example.com/"
a = 1
count = 12
for_each = ["a", "b", "c"]
depends_on = [
module.bar,
]
}

View File

@ -0,0 +1,25 @@
output "foo" {
value = "hello"
}
output "bar" {
value = local.bar
}
output "baz" {
value = "ssshhhhhhh"
sensitive = true
}
output "cheeze_pizza" {
description = "Nothing special"
value = "🍕"
}
output "π" {
value = 3.14159265359
depends_on = [
pizza.cheese,
]
}

View File

@ -0,0 +1,15 @@
provider "foo" {
}
provider "bar" {
version = ">= 1.0.2"
other = 12
}
provider "bar" {
other = 13
alias = "bar"
}

View File

@ -0,0 +1,7 @@
terraform {
required_providers {
aws = "~> 1.0.0"
consul = "~> 1.2.0"
}
}

View File

@ -0,0 +1,4 @@
terraform {
required_version = "~> 0.12.0"
}

View File

@ -0,0 +1,8 @@
resource "aws_security_group" "firewall" {
}
resource "aws_instance" "web" {
depends_on = [
"aws_security_group.firewall",
]
}

View File

@ -0,0 +1,5 @@
resource "aws_instance" "web" {
lifecycle {
ignore_changes = ["*"]
}
}

View File

@ -0,0 +1,11 @@
{
"resource": {
"aws_instance": {
"web": {
"lifecycle": {
"ignore_changes": ["*"]
}
}
}
}
}

View File

@ -0,0 +1,5 @@
resource "aws_instance" "web" {
lifecycle {
ignore_changes = all
}
}

View File

@ -0,0 +1,11 @@
{
"resource": {
"aws_instance": {
"web": {
"lifecycle": {
"ignore_changes": "all"
}
}
}
}
}

View File

@ -0,0 +1,7 @@
resource "aws_instance" "web" {
lifecycle {
ignore_changes = [
"ami",
]
}
}

View File

@ -0,0 +1,6 @@
resource "aws_security_group" "firewall" {
provisioner "local-exec" {
command = "echo hello"
on_failure = "continue"
}
}

View File

@ -0,0 +1,6 @@
resource "aws_security_group" "firewall" {
provisioner "local-exec" {
command = "echo hello"
when = "destroy"
}
}

View File

@ -0,0 +1,42 @@
resource "aws_security_group" "firewall" {
lifecycle {
create_before_destroy = true
prevent_destroy = true
ignore_changes = [
description,
]
}
connection {
host = "127.0.0.1"
}
provisioner "local-exec" {
command = "echo hello"
connection {
host = "10.1.2.1"
}
}
provisioner "local-exec" {
command = "echo hello"
}
}
resource "aws_instance" "web" {
ami = "ami-1234"
security_groups = [
"foo",
"bar",
]
network_interface {
device_index = 0
description = "Main network interface"
}
depends_on = [
aws_security_group.firewall,
]
}

View File

@ -0,0 +1,3 @@
variable "bad_type" {
type = "string"
}

View File

@ -0,0 +1,24 @@
variable "foo" {
}
variable "bar" {
default = "hello"
}
variable "baz" {
type = list
}
variable "bar-baz" {
default = []
type = list
}
variable "cheeze_pizza" {
description = "Nothing special"
}
variable "π" {
default = 3.14159265359
}

View File

@ -0,0 +1,21 @@
{
"variable": {
"foo": {},
"bar": {
"default": "hello"
},
"baz": {
"type": "list"
},
"bar-baz": {
"default": [],
"type": "list"
},
"cheese_pizza": {
"description": "Nothing special"
},
"π": {
"default": 3.14159265359
}
}
}

View File

@ -0,0 +1,2 @@
This directory is intentionally empty, to test what happens when we load
a module that contains no configuration files.

View File

@ -0,0 +1,7 @@
module "example" {
source = "./example2-a_override"
foo = "a_override foo"
new = "a_override new"
}

View File

@ -0,0 +1,5 @@
module "example" {
new = "b_override new"
newer = "b_override newer"
}

View File

@ -0,0 +1,7 @@
module "example" {
source = "./example2"
kept = "primary kept"
foo = "primary foo"
}

View File

@ -0,0 +1,3 @@
output "foo" {
sensitive = true
}

Some files were not shown because too many files have changed in this diff Show More